pulled dev and resolved conflicts
This commit is contained in:
commit
66c8ea8b86
277 changed files with 12725 additions and 4476 deletions
|
|
@ -37,11 +37,13 @@ pg_port=5432
|
|||
pg_user=postgres
|
||||
pg_timeout=30
|
||||
pg_minconn=45
|
||||
PG_RETRY_MAX=50
|
||||
PG_RETRY_INTERVAL=2
|
||||
put_S3_TTL=20
|
||||
sentryURL=
|
||||
sessions_bucket=mobs
|
||||
sessions_region=us-east-1
|
||||
sourcemaps_bucket=sourcemaps
|
||||
sourcemaps_reader=http://utilities-openreplay.app.svc.cluster.local:9000/sourcemaps
|
||||
sourcemaps_reader=http://127.0.0.1:9000/
|
||||
stage=default-foss
|
||||
version_number=1.4.0
|
||||
|
|
@ -5,6 +5,15 @@ WORKDIR /work
|
|||
COPY . .
|
||||
RUN pip install -r requirements.txt
|
||||
RUN mv .env.default .env
|
||||
ENV APP_NAME chalice
|
||||
# Installing Nodejs
|
||||
RUN apt update && apt install -y curl && \
|
||||
curl -fsSL https://deb.nodesource.com/setup_12.x | bash - && \
|
||||
apt install -y nodejs && \
|
||||
apt remove --purge -y curl && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
cd sourcemap-reader && \
|
||||
npm install
|
||||
|
||||
# Add Tini
|
||||
# Startup daemon
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ COPY . .
|
|||
RUN pip install -r requirements.txt
|
||||
RUN mv .env.default .env && mv app_alerts.py app.py
|
||||
ENV pg_minconn 2
|
||||
ENV APP_NAME alerts
|
||||
|
||||
# Add Tini
|
||||
# Startup daemon
|
||||
|
|
|
|||
|
|
@ -9,12 +9,11 @@ from starlette.responses import StreamingResponse
|
|||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from routers import core, core_dynamic
|
||||
from routers.app import v1_api
|
||||
from routers.crons import core_crons
|
||||
from routers.crons import core_dynamic_crons
|
||||
from routers.subs import dashboard
|
||||
from routers.subs import dashboard, insights, metrics, v1_api
|
||||
|
||||
app = FastAPI()
|
||||
app = FastAPI(root_path="/api")
|
||||
|
||||
|
||||
@app.middleware('http')
|
||||
|
|
@ -54,7 +53,8 @@ app.include_router(core_dynamic.public_app)
|
|||
app.include_router(core_dynamic.app)
|
||||
app.include_router(core_dynamic.app_apikey)
|
||||
app.include_router(dashboard.app)
|
||||
# app.include_router(insights.app)
|
||||
app.include_router(metrics.app)
|
||||
app.include_router(insights.app)
|
||||
app.include_router(v1_api.app_apikey)
|
||||
|
||||
Schedule = AsyncIOScheduler()
|
||||
|
|
|
|||
24
api/auth/auth_project.py
Normal file
24
api/auth/auth_project.py
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
from fastapi import Request
|
||||
from starlette import status
|
||||
from starlette.exceptions import HTTPException
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import projects
|
||||
from or_dependencies import OR_context
|
||||
|
||||
|
||||
class ProjectAuthorizer:
|
||||
def __init__(self, project_identifier):
|
||||
self.project_identifier: str = project_identifier
|
||||
|
||||
async def __call__(self, request: Request) -> None:
|
||||
if len(request.path_params.keys()) == 0 or request.path_params.get(self.project_identifier) is None:
|
||||
return
|
||||
current_user: schemas.CurrentContext = await OR_context(request)
|
||||
project_identifier = request.path_params[self.project_identifier]
|
||||
if (self.project_identifier == "projectId" \
|
||||
and projects.get_project(project_id=project_identifier, tenant_id=current_user.tenant_id) is None) \
|
||||
or (self.project_identifier.lower() == "projectKey" \
|
||||
and projects.get_internal_project_id(project_key=project_identifier) is None):
|
||||
print("project not found")
|
||||
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="project not found.")
|
||||
|
|
@ -18,6 +18,8 @@ check_prereq() {
|
|||
}
|
||||
|
||||
function build_api(){
|
||||
cp -R ../utilities/utils ../sourcemap-reader/.
|
||||
cp -R ../sourcemap-reader .
|
||||
tag=""
|
||||
# Copy enterprise code
|
||||
[[ $1 == "ee" ]] && {
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ def generate_jwt(id, tenant_id, iat, aud):
|
|||
payload={
|
||||
"userId": id,
|
||||
"tenantId": tenant_id,
|
||||
"exp": iat // 1000 + config("jwt_exp_delta_seconds",cast=int) + TimeUTC.get_utc_offset() // 1000,
|
||||
"exp": iat // 1000 + config("jwt_exp_delta_seconds", cast=int) + TimeUTC.get_utc_offset() // 1000,
|
||||
"iss": config("jwt_issuer"),
|
||||
"iat": iat // 1000,
|
||||
"aud": aud
|
||||
|
|
|
|||
|
|
@ -5,39 +5,38 @@ from chalicelib.core import users
|
|||
|
||||
|
||||
def get_state(tenant_id):
|
||||
my_projects = projects.get_projects(tenant_id=tenant_id, recording_state=False)
|
||||
pids = [s["projectId"] for s in my_projects]
|
||||
pids = projects.get_projects_ids(tenant_id=tenant_id)
|
||||
with pg_client.PostgresClient() as cur:
|
||||
recorded = False
|
||||
meta = False
|
||||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
SELECT
|
||||
COUNT(*)
|
||||
FROM public.sessions AS s
|
||||
where s.project_id IN %(ids)s
|
||||
LIMIT 1;""",
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)})
|
||||
)
|
||||
recorded = cur.fetchone()["count"] > 0
|
||||
recorded = cur.fetchone()["exists"]
|
||||
meta = False
|
||||
if recorded:
|
||||
cur.execute("""SELECT SUM((SELECT COUNT(t.meta)
|
||||
FROM (VALUES (p.metadata_1), (p.metadata_2), (p.metadata_3), (p.metadata_4), (p.metadata_5),
|
||||
(p.metadata_6), (p.metadata_7), (p.metadata_8), (p.metadata_9), (p.metadata_10),
|
||||
(sessions.user_id)) AS t(meta)
|
||||
WHERE t.meta NOTNULL))
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 'defined'
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id=p.project_id AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON(TRUE)
|
||||
WHERE p.deleted_at ISNULL;"""
|
||||
)
|
||||
cur.execute("""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = p.project_id
|
||||
AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||
WHERE p.deleted_at ISNULL
|
||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;""")
|
||||
|
||||
meta = cur.fetchone()["sum"] > 0
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return [
|
||||
{"task": "Install OpenReplay",
|
||||
|
|
@ -58,22 +57,18 @@ def get_state(tenant_id):
|
|||
|
||||
|
||||
def get_state_installing(tenant_id):
|
||||
my_projects = projects.get_projects(tenant_id=tenant_id, recording_state=False)
|
||||
pids = [s["projectId"] for s in my_projects]
|
||||
pids = projects.get_projects_ids(tenant_id=tenant_id)
|
||||
with pg_client.PostgresClient() as cur:
|
||||
recorded = False
|
||||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
SELECT
|
||||
COUNT(*)
|
||||
FROM public.sessions AS s
|
||||
where s.project_id IN %(ids)s
|
||||
LIMIT 1;""",
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)})
|
||||
)
|
||||
recorded = cur.fetchone()["count"] > 0
|
||||
recorded = cur.fetchone()["exists"]
|
||||
|
||||
return {"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
|
|
@ -82,20 +77,23 @@ def get_state_installing(tenant_id):
|
|||
|
||||
def get_state_identify_users(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
"""SELECT SUM((SELECT COUNT(t.meta)
|
||||
FROM (VALUES (p.metadata_1), (p.metadata_2), (p.metadata_3), (p.metadata_4), (p.metadata_5),
|
||||
(p.metadata_6), (p.metadata_7), (p.metadata_8), (p.metadata_9), (p.metadata_10),
|
||||
(sessions.user_id)) AS t(meta)
|
||||
WHERE t.meta NOTNULL))
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 'defined'
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id=p.project_id AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON(TRUE)
|
||||
WHERE p.deleted_at ISNULL;""")
|
||||
cur.execute("""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = p.project_id
|
||||
AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||
WHERE p.deleted_at ISNULL
|
||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;""")
|
||||
|
||||
meta = cur.fetchone()["sum"] > 0
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return {"task": "Identify Users",
|
||||
"done": meta,
|
||||
|
|
|
|||
|
|
@ -9,11 +9,11 @@ from chalicelib.utils.TimeUTC import TimeUTC
|
|||
PIE_CHART_GROUP = 5
|
||||
|
||||
|
||||
def __try_live(project_id, data: schemas.CreateCustomMetricsSchema):
|
||||
def __try_live(project_id, data: schemas.TryCustomMetricsPayloadSchema):
|
||||
results = []
|
||||
for i, s in enumerate(data.series):
|
||||
s.filter.startDate = data.startDate
|
||||
s.filter.endDate = data.endDate
|
||||
s.filter.startDate = data.startTimestamp
|
||||
s.filter.endDate = data.endTimestamp
|
||||
results.append(sessions.search2_series(data=s.filter, project_id=project_id, density=data.density,
|
||||
view_type=data.view_type, metric_type=data.metric_type,
|
||||
metric_of=data.metric_of, metric_value=data.metric_value))
|
||||
|
|
@ -42,7 +42,7 @@ def __try_live(project_id, data: schemas.CreateCustomMetricsSchema):
|
|||
return results
|
||||
|
||||
|
||||
def merged_live(project_id, data: schemas.CreateCustomMetricsSchema):
|
||||
def merged_live(project_id, data: schemas.TryCustomMetricsPayloadSchema):
|
||||
series_charts = __try_live(project_id=project_id, data=data)
|
||||
if data.view_type == schemas.MetricTimeseriesViewType.progress or data.metric_type == schemas.MetricType.table:
|
||||
return series_charts
|
||||
|
|
@ -54,13 +54,9 @@ def merged_live(project_id, data: schemas.CreateCustomMetricsSchema):
|
|||
return results
|
||||
|
||||
|
||||
def __get_merged_metric(project_id, user_id, metric_id,
|
||||
data: Union[schemas.CustomMetricChartPayloadSchema,
|
||||
schemas.CustomMetricSessionsPayloadSchema]) \
|
||||
def __merge_metric_with_data(metric, data: Union[schemas.CustomMetricChartPayloadSchema,
|
||||
schemas.CustomMetricSessionsPayloadSchema]) \
|
||||
-> Union[schemas.CreateCustomMetricsSchema, None]:
|
||||
metric = get(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
if metric is None:
|
||||
return None
|
||||
metric: schemas.CreateCustomMetricsSchema = schemas.CreateCustomMetricsSchema.parse_obj({**data.dict(), **metric})
|
||||
if len(data.filters) > 0 or len(data.events) > 0:
|
||||
for s in metric.series:
|
||||
|
|
@ -71,11 +67,12 @@ def __get_merged_metric(project_id, user_id, metric_id,
|
|||
return metric
|
||||
|
||||
|
||||
def make_chart(project_id, user_id, metric_id, data: schemas.CustomMetricChartPayloadSchema):
|
||||
metric: schemas.CreateCustomMetricsSchema = __get_merged_metric(project_id=project_id, user_id=user_id,
|
||||
metric_id=metric_id, data=data)
|
||||
def make_chart(project_id, user_id, metric_id, data: schemas.CustomMetricChartPayloadSchema, metric=None):
|
||||
if metric is None:
|
||||
metric = get(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
if metric is None:
|
||||
return None
|
||||
metric: schemas.CreateCustomMetricsSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
series_charts = __try_live(project_id=project_id, data=metric)
|
||||
if metric.view_type == schemas.MetricTimeseriesViewType.progress or metric.metric_type == schemas.MetricType.table:
|
||||
return series_charts
|
||||
|
|
@ -88,21 +85,23 @@ def make_chart(project_id, user_id, metric_id, data: schemas.CustomMetricChartPa
|
|||
|
||||
|
||||
def get_sessions(project_id, user_id, metric_id, data: schemas.CustomMetricSessionsPayloadSchema):
|
||||
metric: schemas.CreateCustomMetricsSchema = __get_merged_metric(project_id=project_id, user_id=user_id,
|
||||
metric_id=metric_id, data=data)
|
||||
metric = get(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||
if metric is None:
|
||||
return None
|
||||
metric: schemas.CreateCustomMetricsSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||
if metric is None:
|
||||
return None
|
||||
results = []
|
||||
for s in metric.series:
|
||||
s.filter.startDate = data.startDate
|
||||
s.filter.endDate = data.endDate
|
||||
s.filter.startDate = data.startTimestamp
|
||||
s.filter.endDate = data.endTimestamp
|
||||
results.append({"seriesId": s.series_id, "seriesName": s.name,
|
||||
**sessions.search2_pg(data=s.filter, project_id=project_id, user_id=user_id)})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def create(project_id, user_id, data: schemas.CreateCustomMetricsSchema):
|
||||
def create(project_id, user_id, data: schemas.CreateCustomMetricsSchema, dashboard=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
_data = {}
|
||||
for i, s in enumerate(data.series):
|
||||
|
|
@ -129,6 +128,8 @@ def create(project_id, user_id, data: schemas.CreateCustomMetricsSchema):
|
|||
query
|
||||
)
|
||||
r = cur.fetchone()
|
||||
if dashboard:
|
||||
return r["metric_id"]
|
||||
return {"data": get(metric_id=r["metric_id"], project_id=project_id, user_id=user_id)}
|
||||
|
||||
|
||||
|
|
@ -147,10 +148,11 @@ def update(metric_id, user_id, project_id, data: schemas.UpdateCustomMetricsSche
|
|||
"metric_value": data.metric_value, "metric_format": data.metric_format}
|
||||
for i, s in enumerate(data.series):
|
||||
prefix = "u_"
|
||||
if s.index is None:
|
||||
s.index = i
|
||||
if s.series_id is None or s.series_id not in series_ids:
|
||||
n_series.append({"i": i, "s": s})
|
||||
prefix = "n_"
|
||||
s.index = i
|
||||
else:
|
||||
u_series.append({"i": i, "s": s})
|
||||
u_series_ids.append(s.series_id)
|
||||
|
|
@ -192,40 +194,60 @@ def update(metric_id, user_id, project_id, data: schemas.UpdateCustomMetricsSche
|
|||
SET name = %(name)s, is_public= %(is_public)s,
|
||||
view_type= %(view_type)s, metric_type= %(metric_type)s,
|
||||
metric_of= %(metric_of)s, metric_value= %(metric_value)s,
|
||||
metric_format= %(metric_format)s
|
||||
metric_format= %(metric_format)s,
|
||||
edited_at = timezone('utc'::text, now())
|
||||
WHERE metric_id = %(metric_id)s
|
||||
AND project_id = %(project_id)s
|
||||
AND (user_id = %(user_id)s OR is_public)
|
||||
RETURNING metric_id;""", params)
|
||||
cur.execute(
|
||||
query
|
||||
)
|
||||
cur.execute(query)
|
||||
return get(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
||||
|
||||
|
||||
def get_all(project_id, user_id):
|
||||
def get_all(project_id, user_id, include_series=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT *
|
||||
FROM metrics
|
||||
LEFT JOIN LATERAL (SELECT jsonb_agg(metric_series.* ORDER BY index) AS series
|
||||
sub_join = ""
|
||||
if include_series:
|
||||
sub_join = """LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)
|
||||
) AS metric_series ON (TRUE)"""
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
f"""SELECT *
|
||||
FROM metrics
|
||||
{sub_join}
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
||||
FROM (SELECT DISTINCT dashboard_id, name, is_public
|
||||
FROM dashboards INNER JOIN dashboard_widgets USING (dashboard_id)
|
||||
WHERE deleted_at ISNULL
|
||||
AND dashboard_widgets.metric_id = metrics.metric_id
|
||||
AND project_id = %(project_id)s
|
||||
AND ((dashboards.user_id = %(user_id)s OR is_public))) AS connected_dashboards
|
||||
) AS connected_dashboards ON (TRUE)
|
||||
LEFT JOIN LATERAL (SELECT email AS owner_email
|
||||
FROM users
|
||||
WHERE deleted_at ISNULL
|
||||
AND users.user_id = metrics.user_id
|
||||
) AS owner ON (TRUE)
|
||||
WHERE metrics.project_id = %(project_id)s
|
||||
AND metrics.deleted_at ISNULL
|
||||
AND (user_id = %(user_id)s OR is_public)
|
||||
ORDER BY created_at;""",
|
||||
AND (user_id = %(user_id)s OR metrics.is_public)
|
||||
ORDER BY metrics.edited_at, metrics.created_at;""",
|
||||
{"project_id": project_id, "user_id": user_id}
|
||||
)
|
||||
)
|
||||
rows = cur.fetchall()
|
||||
for r in rows:
|
||||
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
|
||||
for s in r["series"]:
|
||||
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||
if include_series:
|
||||
for r in rows:
|
||||
# r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
|
||||
for s in r["series"]:
|
||||
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||
else:
|
||||
for r in rows:
|
||||
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
|
||||
r["edited_at"] = TimeUTC.datetime_to_timestamp(r["edited_at"])
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
return rows
|
||||
|
||||
|
|
@ -235,7 +257,7 @@ def delete(project_id, metric_id, user_id):
|
|||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
UPDATE public.metrics
|
||||
SET deleted_at = timezone('utc'::text, now())
|
||||
SET deleted_at = timezone('utc'::text, now()), edited_at = timezone('utc'::text, now())
|
||||
WHERE project_id = %(project_id)s
|
||||
AND metric_id = %(metric_id)s
|
||||
AND (user_id = %(user_id)s OR is_public);""",
|
||||
|
|
@ -256,6 +278,18 @@ def get(metric_id, project_id, user_id, flatten=True):
|
|||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
||||
FROM (SELECT dashboard_id, name, is_public
|
||||
FROM dashboards
|
||||
WHERE deleted_at ISNULL
|
||||
AND project_id = %(project_id)s
|
||||
AND ((user_id = %(user_id)s OR is_public))) AS connected_dashboards
|
||||
) AS connected_dashboards ON (TRUE)
|
||||
LEFT JOIN LATERAL (SELECT email AS owner_email
|
||||
FROM users
|
||||
WHERE deleted_at ISNULL
|
||||
AND users.user_id = metrics.user_id
|
||||
) AS owner ON (TRUE)
|
||||
WHERE metrics.project_id = %(project_id)s
|
||||
AND metrics.deleted_at ISNULL
|
||||
AND (metrics.user_id = %(user_id)s OR metrics.is_public)
|
||||
|
|
@ -268,12 +302,46 @@ def get(metric_id, project_id, user_id, flatten=True):
|
|||
if row is None:
|
||||
return None
|
||||
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
|
||||
row["edited_at"] = TimeUTC.datetime_to_timestamp(row["edited_at"])
|
||||
if flatten:
|
||||
for s in row["series"]:
|
||||
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def get_with_template(metric_id, project_id, user_id, include_dashboard=True):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_query = ""
|
||||
if include_dashboard:
|
||||
sub_query = """LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
||||
FROM (SELECT dashboard_id, name, is_public
|
||||
FROM dashboards
|
||||
WHERE deleted_at ISNULL
|
||||
AND project_id = %(project_id)s
|
||||
AND ((user_id = %(user_id)s OR is_public))) AS connected_dashboards
|
||||
) AS connected_dashboards ON (TRUE)"""
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
f"""SELECT *
|
||||
FROM metrics
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)
|
||||
{sub_query}
|
||||
WHERE (metrics.project_id = %(project_id)s OR metrics.project_id ISNULL)
|
||||
AND metrics.deleted_at ISNULL
|
||||
AND (metrics.user_id = %(user_id)s OR metrics.is_public)
|
||||
AND metrics.metric_id = %(metric_id)s
|
||||
ORDER BY created_at;""",
|
||||
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id}
|
||||
)
|
||||
)
|
||||
row = cur.fetchone()
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def get_series_for_alert(project_id, user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
309
api/chalicelib/core/dashboards2.py
Normal file
309
api/chalicelib/core/dashboards2.py
Normal file
|
|
@ -0,0 +1,309 @@
|
|||
import json
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import custom_metrics, dashboard
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
CATEGORY_DESCRIPTION = {
|
||||
'overview': 'lorem ipsum',
|
||||
'custom': 'lorem cusipsum',
|
||||
'errors': 'lorem erripsum',
|
||||
'performance': 'lorem perfipsum',
|
||||
'resources': 'lorem resipsum'
|
||||
}
|
||||
|
||||
|
||||
def get_templates(project_id, user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = cur.mogrify(f"""SELECT category, jsonb_agg(metrics ORDER BY name) AS widgets
|
||||
FROM (SELECT * , default_config AS config
|
||||
FROM metrics LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index), '[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)
|
||||
WHERE deleted_at IS NULL
|
||||
AND (project_id ISNULL OR (project_id = %(project_id)s AND (is_public OR user_id= %(userId)s)))
|
||||
) AS metrics
|
||||
GROUP BY category
|
||||
ORDER BY category;""", {"project_id": project_id, "userId": user_id})
|
||||
cur.execute(pg_query)
|
||||
rows = cur.fetchall()
|
||||
for r in rows:
|
||||
r["description"] = CATEGORY_DESCRIPTION.get(r["category"], "")
|
||||
for w in r["widgets"]:
|
||||
w["created_at"] = TimeUTC.datetime_to_timestamp(w["created_at"])
|
||||
w["edited_at"] = TimeUTC.datetime_to_timestamp(w["edited_at"])
|
||||
return helper.list_to_camel_case(rows)
|
||||
|
||||
|
||||
def create_dashboard(project_id, user_id, data: schemas.CreateDashboardSchema):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""INSERT INTO dashboards(project_id, user_id, name, is_public, is_pinned)
|
||||
VALUES(%(projectId)s, %(userId)s, %(name)s, %(is_public)s, %(is_pinned)s)
|
||||
RETURNING *"""
|
||||
params = {"userId": user_id, "projectId": project_id, **data.dict()}
|
||||
if data.metrics is not None and len(data.metrics) > 0:
|
||||
pg_query = f"""WITH dash AS ({pg_query})
|
||||
INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config)
|
||||
VALUES {",".join([f"((SELECT dashboard_id FROM dash),%(metric_id_{i})s, %(userId)s, (SELECT default_config FROM metrics WHERE metric_id=%(metric_id_{i})s)||%(config_{i})s)" for i in range(len(data.metrics))])}
|
||||
RETURNING (SELECT dashboard_id FROM dash)"""
|
||||
for i, m in enumerate(data.metrics):
|
||||
params[f"metric_id_{i}"] = m
|
||||
# params[f"config_{i}"] = schemas.AddWidgetToDashboardPayloadSchema.schema() \
|
||||
# .get("properties", {}).get("config", {}).get("default", {})
|
||||
# params[f"config_{i}"]["position"] = i
|
||||
# params[f"config_{i}"] = json.dumps(params[f"config_{i}"])
|
||||
params[f"config_{i}"] = json.dumps({"position": i})
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is None:
|
||||
return {"errors": ["something went wrong while creating the dashboard"]}
|
||||
return {"data": get_dashboard(project_id=project_id, user_id=user_id, dashboard_id=row["dashboard_id"])}
|
||||
|
||||
|
||||
def get_dashboards(project_id, user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT *
|
||||
FROM dashboards
|
||||
WHERE deleted_at ISNULL
|
||||
AND project_id = %(projectId)s
|
||||
AND (user_id = %(userId)s OR is_public);"""
|
||||
params = {"userId": user_id, "projectId": project_id}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
rows = cur.fetchall()
|
||||
return helper.list_to_camel_case(rows)
|
||||
|
||||
|
||||
def get_dashboard(project_id, user_id, dashboard_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = """SELECT dashboards.*, all_metric_widgets.widgets AS widgets
|
||||
FROM dashboards
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(JSONB_AGG(raw_metrics), '[]') AS widgets
|
||||
FROM (SELECT dashboard_widgets.*, metrics.*, metric_series.series
|
||||
FROM metrics
|
||||
INNER JOIN dashboard_widgets USING (metric_id)
|
||||
LEFT JOIN LATERAL (SELECT JSONB_AGG(metric_series.* ORDER BY index) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)
|
||||
WHERE dashboard_widgets.dashboard_id = dashboards.dashboard_id
|
||||
AND metrics.deleted_at ISNULL
|
||||
AND (metrics.project_id = %(projectId)s OR metrics.project_id ISNULL)) AS raw_metrics
|
||||
) AS all_metric_widgets ON (TRUE)
|
||||
WHERE dashboards.deleted_at ISNULL
|
||||
AND dashboards.project_id = %(projectId)s
|
||||
AND dashboard_id = %(dashboard_id)s
|
||||
AND (dashboards.user_id = %(userId)s OR is_public);"""
|
||||
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
for w in row["widgets"]:
|
||||
row["created_at"] = TimeUTC.datetime_to_timestamp(w["created_at"])
|
||||
row["edited_at"] = TimeUTC.datetime_to_timestamp(w["edited_at"])
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def delete_dashboard(project_id, user_id, dashboard_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = """UPDATE dashboards
|
||||
SET deleted_at = timezone('utc'::text, now())
|
||||
WHERE dashboards.project_id = %(projectId)s
|
||||
AND dashboard_id = %(dashboard_id)s
|
||||
AND (dashboards.user_id = %(userId)s OR is_public);"""
|
||||
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
return {"data": {"success": True}}
|
||||
|
||||
|
||||
def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashboardSchema):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""UPDATE dashboards
|
||||
SET name = %(name)s
|
||||
{", is_public = %(is_public)s" if data.is_public is not None else ""}
|
||||
{", is_pinned = %(is_pinned)s" if data.is_pinned is not None else ""}
|
||||
WHERE dashboards.project_id = %(projectId)s
|
||||
AND dashboard_id = %(dashboard_id)s
|
||||
AND (dashboards.user_id = %(userId)s OR is_public)"""
|
||||
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id, **data.dict()}
|
||||
if data.metrics is not None and len(data.metrics) > 0:
|
||||
pg_query = f"""WITH dash AS ({pg_query})
|
||||
INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config)
|
||||
VALUES {",".join([f"(%(dashboard_id)s, %(metric_id_{i})s, %(userId)s, (SELECT default_config FROM metrics WHERE metric_id=%(metric_id_{i})s)||%(config_{i})s)" for i in range(len(data.metrics))])};"""
|
||||
for i, m in enumerate(data.metrics):
|
||||
params[f"metric_id_{i}"] = m
|
||||
# params[f"config_{i}"] = schemas.AddWidgetToDashboardPayloadSchema.schema() \
|
||||
# .get("properties", {}).get("config", {}).get("default", {})
|
||||
# params[f"config_{i}"]["position"] = i
|
||||
# params[f"config_{i}"] = json.dumps(params[f"config_{i}"])
|
||||
params[f"config_{i}"] = json.dumps({"position": i})
|
||||
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
|
||||
return get_dashboard(project_id=project_id, user_id=user_id, dashboard_id=dashboard_id)
|
||||
|
||||
|
||||
def get_widget(project_id, user_id, dashboard_id, widget_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = """SELECT metrics.*, metric_series.series
|
||||
FROM dashboard_widgets
|
||||
INNER JOIN dashboards USING (dashboard_id)
|
||||
INNER JOIN metrics USING (metric_id)
|
||||
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index), '[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)
|
||||
WHERE dashboard_id = %(dashboard_id)s
|
||||
AND widget_id = %(widget_id)s
|
||||
AND (dashboards.is_public OR dashboards.user_id = %(userId)s)
|
||||
AND dashboards.deleted_at IS NULL
|
||||
AND metrics.deleted_at ISNULL
|
||||
AND (metrics.project_id = %(projectId)s OR metrics.project_id ISNULL)
|
||||
AND (metrics.is_public OR metrics.user_id = %(userId)s);"""
|
||||
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id, "widget_id": widget_id}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def add_widget(project_id, user_id, dashboard_id, data: schemas.AddWidgetToDashboardPayloadSchema):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = """INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config)
|
||||
SELECT %(dashboard_id)s AS dashboard_id, %(metric_id)s AS metric_id,
|
||||
%(userId)s AS user_id, (SELECT default_config FROM metrics WHERE metric_id=%(metric_id)s)||%(config)s::jsonb AS config
|
||||
WHERE EXISTS(SELECT 1 FROM dashboards
|
||||
WHERE dashboards.deleted_at ISNULL AND dashboards.project_id = %(projectId)s
|
||||
AND dashboard_id = %(dashboard_id)s
|
||||
AND (dashboards.user_id = %(userId)s OR is_public))
|
||||
RETURNING *;"""
|
||||
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id, **data.dict()}
|
||||
params["config"] = json.dumps(data.config)
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def update_widget(project_id, user_id, dashboard_id, widget_id, data: schemas.UpdateWidgetPayloadSchema):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = """UPDATE dashboard_widgets
|
||||
SET config= %(config)s
|
||||
WHERE dashboard_id=%(dashboard_id)s AND widget_id=%(widget_id)s
|
||||
RETURNING *;"""
|
||||
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id,
|
||||
"widget_id": widget_id, **data.dict()}
|
||||
params["config"] = json.dumps(data.config)
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def remove_widget(project_id, user_id, dashboard_id, widget_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = """DELETE FROM dashboard_widgets
|
||||
WHERE dashboard_id=%(dashboard_id)s AND widget_id=%(widget_id)s;"""
|
||||
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id, "widget_id": widget_id}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
return {"data": {"success": True}}
|
||||
|
||||
|
||||
def pin_dashboard(project_id, user_id, dashboard_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = """UPDATE dashboards
|
||||
SET is_pinned = FALSE
|
||||
WHERE project_id=%(project_id)s;
|
||||
UPDATE dashboards
|
||||
SET is_pinned = True
|
||||
WHERE dashboard_id=%(dashboard_id)s AND project_id=%(project_id)s AND deleted_at ISNULL
|
||||
RETURNING *;"""
|
||||
params = {"userId": user_id, "project_id": project_id, "dashboard_id": dashboard_id}
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
row = cur.fetchone()
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def create_metric_add_widget(project_id, user_id, dashboard_id, data: schemas.CreateCustomMetricsSchema):
|
||||
metric_id = custom_metrics.create(project_id=project_id, user_id=user_id, data=data, dashboard=True)
|
||||
return add_widget(project_id=project_id, user_id=user_id, dashboard_id=dashboard_id,
|
||||
data=schemas.AddWidgetToDashboardPayloadSchema(metricId=metric_id))
|
||||
|
||||
|
||||
PREDEFINED = {schemas.TemplatePredefinedKeys.count_sessions: dashboard.get_processed_sessions,
|
||||
schemas.TemplatePredefinedKeys.avg_image_load_time: dashboard.get_application_activity_avg_image_load_time,
|
||||
schemas.TemplatePredefinedKeys.avg_page_load_time: dashboard.get_application_activity_avg_page_load_time,
|
||||
schemas.TemplatePredefinedKeys.avg_request_load_time: dashboard.get_application_activity_avg_request_load_time,
|
||||
schemas.TemplatePredefinedKeys.avg_dom_content_load_start: dashboard.get_page_metrics_avg_dom_content_load_start,
|
||||
schemas.TemplatePredefinedKeys.avg_first_contentful_pixel: dashboard.get_page_metrics_avg_first_contentful_pixel,
|
||||
schemas.TemplatePredefinedKeys.avg_visited_pages: dashboard.get_user_activity_avg_visited_pages,
|
||||
schemas.TemplatePredefinedKeys.avg_session_duration: dashboard.get_user_activity_avg_session_duration,
|
||||
schemas.TemplatePredefinedKeys.avg_pages_dom_buildtime: dashboard.get_pages_dom_build_time,
|
||||
schemas.TemplatePredefinedKeys.avg_pages_response_time: dashboard.get_pages_response_time,
|
||||
schemas.TemplatePredefinedKeys.avg_response_time: dashboard.get_top_metrics_avg_response_time,
|
||||
schemas.TemplatePredefinedKeys.avg_first_paint: dashboard.get_top_metrics_avg_first_paint,
|
||||
schemas.TemplatePredefinedKeys.avg_dom_content_loaded: dashboard.get_top_metrics_avg_dom_content_loaded,
|
||||
schemas.TemplatePredefinedKeys.avg_till_first_bit: dashboard.get_top_metrics_avg_till_first_bit,
|
||||
schemas.TemplatePredefinedKeys.avg_time_to_interactive: dashboard.get_top_metrics_avg_time_to_interactive,
|
||||
schemas.TemplatePredefinedKeys.count_requests: dashboard.get_top_metrics_count_requests,
|
||||
schemas.TemplatePredefinedKeys.avg_time_to_render: dashboard.get_time_to_render,
|
||||
schemas.TemplatePredefinedKeys.avg_used_js_heap_size: dashboard.get_memory_consumption,
|
||||
schemas.TemplatePredefinedKeys.avg_cpu: dashboard.get_avg_cpu,
|
||||
schemas.TemplatePredefinedKeys.avg_fps: dashboard.get_avg_fps,
|
||||
schemas.TemplatePredefinedKeys.impacted_sessions_by_js_errors: dashboard.get_impacted_sessions_by_js_errors,
|
||||
schemas.TemplatePredefinedKeys.domains_errors_4xx: dashboard.get_domains_errors_4xx,
|
||||
schemas.TemplatePredefinedKeys.domains_errors_5xx: dashboard.get_domains_errors_5xx,
|
||||
schemas.TemplatePredefinedKeys.errors_per_domains: dashboard.get_errors_per_domains,
|
||||
schemas.TemplatePredefinedKeys.calls_errors: dashboard.get_calls_errors,
|
||||
schemas.TemplatePredefinedKeys.errors_by_type: dashboard.get_errors_per_type,
|
||||
schemas.TemplatePredefinedKeys.errors_by_origin: dashboard.get_resources_by_party,
|
||||
schemas.TemplatePredefinedKeys.speed_index_by_location: dashboard.get_speed_index_location,
|
||||
schemas.TemplatePredefinedKeys.slowest_domains: dashboard.get_slowest_domains,
|
||||
schemas.TemplatePredefinedKeys.sessions_per_browser: dashboard.get_sessions_per_browser,
|
||||
schemas.TemplatePredefinedKeys.time_to_render: dashboard.get_time_to_render,
|
||||
schemas.TemplatePredefinedKeys.impacted_sessions_by_slow_pages: dashboard.get_impacted_sessions_by_slow_pages,
|
||||
schemas.TemplatePredefinedKeys.memory_consumption: dashboard.get_memory_consumption,
|
||||
schemas.TemplatePredefinedKeys.cpu_load: dashboard.get_avg_cpu,
|
||||
schemas.TemplatePredefinedKeys.frame_rate: dashboard.get_avg_fps,
|
||||
schemas.TemplatePredefinedKeys.crashes: dashboard.get_crashes,
|
||||
schemas.TemplatePredefinedKeys.resources_vs_visually_complete: dashboard.get_resources_vs_visually_complete,
|
||||
schemas.TemplatePredefinedKeys.pages_dom_buildtime: dashboard.get_pages_dom_build_time,
|
||||
schemas.TemplatePredefinedKeys.pages_response_time: dashboard.get_pages_response_time,
|
||||
schemas.TemplatePredefinedKeys.pages_response_time_distribution: dashboard.get_pages_response_time_distribution,
|
||||
schemas.TemplatePredefinedKeys.missing_resources: dashboard.get_missing_resources_trend,
|
||||
schemas.TemplatePredefinedKeys.slowest_resources: dashboard.get_slowest_resources,
|
||||
schemas.TemplatePredefinedKeys.resources_fetch_time: dashboard.get_resources_loading_time,
|
||||
schemas.TemplatePredefinedKeys.resource_type_vs_response_end: dashboard.resource_type_vs_response_end,
|
||||
schemas.TemplatePredefinedKeys.resources_count_by_type: dashboard.get_resources_count_by_type,
|
||||
}
|
||||
|
||||
|
||||
def get_predefined_metric(key: schemas.TemplatePredefinedKeys, project_id: int, data: dict):
|
||||
return PREDEFINED.get(key, lambda *args: None)(project_id=project_id, **data)
|
||||
|
||||
|
||||
def make_chart_metrics(project_id, user_id, metric_id, data: schemas.CustomMetricChartPayloadSchema):
|
||||
raw_metric = custom_metrics.get_with_template(metric_id=metric_id, project_id=project_id, user_id=user_id,
|
||||
include_dashboard=False)
|
||||
if raw_metric is None:
|
||||
return None
|
||||
metric = schemas.CustomMetricAndTemplate = schemas.CustomMetricAndTemplate.parse_obj(raw_metric)
|
||||
if metric.is_template:
|
||||
return get_predefined_metric(key=metric.predefined_key, project_id=project_id, data=data.dict())
|
||||
else:
|
||||
return custom_metrics.make_chart(project_id=project_id, user_id=user_id, metric_id=metric_id, data=data,
|
||||
metric=raw_metric)
|
||||
|
||||
|
||||
def make_chart_widget(dashboard_id, project_id, user_id, widget_id, data: schemas.CustomMetricChartPayloadSchema):
|
||||
raw_metric = get_widget(widget_id=widget_id, project_id=project_id, user_id=user_id, dashboard_id=dashboard_id)
|
||||
if raw_metric is None:
|
||||
return None
|
||||
metric = schemas.CustomMetricAndTemplate = schemas.CustomMetricAndTemplate.parse_obj(raw_metric)
|
||||
if metric.is_template:
|
||||
return get_predefined_metric(key=metric.predefined_key, project_id=project_id, data=data.dict())
|
||||
else:
|
||||
return custom_metrics.make_chart(project_id=project_id, user_id=user_id, metric_id=raw_metric["metricId"],
|
||||
data=data, metric=raw_metric)
|
||||
|
|
@ -1,11 +1,8 @@
|
|||
import schemas
|
||||
from chalicelib.core import sessions_metas
|
||||
from chalicelib.core.dashboard import __get_constraints, __get_constraint_values
|
||||
from chalicelib.utils import helper, dev
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import __get_step_size
|
||||
import math
|
||||
from chalicelib.core.dashboard import __get_constraints, __get_constraint_values
|
||||
|
||||
|
||||
def __transform_journey(rows):
|
||||
|
|
@ -930,4 +927,4 @@ def search(text, feature_type, project_id, platform=None):
|
|||
rows = cur.fetchall()
|
||||
else:
|
||||
return []
|
||||
return [helper.dict_to_camel_case(row) for row in rows]
|
||||
return [helper.dict_to_camel_case(row) for row in rows]
|
||||
|
|
|
|||
|
|
@ -15,10 +15,17 @@ class JIRAIntegration(integration_base.BaseIntegration):
|
|||
# TODO: enable super-constructor when OAuth is done
|
||||
# super(JIRAIntegration, self).__init__(jwt, user_id, JIRACloudIntegrationProxy)
|
||||
self._user_id = user_id
|
||||
i = self.get()
|
||||
if i is None:
|
||||
self.integration = self.get()
|
||||
if self.integration is None:
|
||||
return
|
||||
self.issue_handler = JIRACloudIntegrationIssue(token=i["token"], username=i["username"], url=i["url"])
|
||||
self.integration["valid"] = True
|
||||
try:
|
||||
self.issue_handler = JIRACloudIntegrationIssue(token=self.integration["token"],
|
||||
username=self.integration["username"],
|
||||
url=self.integration["url"])
|
||||
except Exception as e:
|
||||
self.issue_handler = None
|
||||
self.integration["valid"] = False
|
||||
|
||||
@property
|
||||
def provider(self):
|
||||
|
|
@ -37,10 +44,10 @@ class JIRAIntegration(integration_base.BaseIntegration):
|
|||
return helper.dict_to_camel_case(cur.fetchone())
|
||||
|
||||
def get_obfuscated(self):
|
||||
integration = self.get()
|
||||
if integration is None:
|
||||
if self.integration is None:
|
||||
return None
|
||||
integration["token"] = obfuscate_string(integration["token"])
|
||||
integration = dict(self.integration)
|
||||
integration["token"] = obfuscate_string(self.integration["token"])
|
||||
integration["provider"] = self.provider.lower()
|
||||
return integration
|
||||
|
||||
|
|
@ -90,14 +97,13 @@ class JIRAIntegration(integration_base.BaseIntegration):
|
|||
return {"state": "success"}
|
||||
|
||||
def add_edit(self, data):
|
||||
s = self.get()
|
||||
if s is not None:
|
||||
if self.integration is not None:
|
||||
return self.update(
|
||||
changes={
|
||||
"username": data["username"],
|
||||
"token": data["token"] \
|
||||
if data.get("token") and len(data["token"]) > 0 and data["token"].find("***") == -1 \
|
||||
else s["token"],
|
||||
else self.integration["token"],
|
||||
"url": data["url"]
|
||||
},
|
||||
obfuscate=True
|
||||
|
|
|
|||
|
|
@ -36,7 +36,10 @@ def get_integration(tenant_id, user_id, tool=None):
|
|||
if tool not in SUPPORTED_TOOLS:
|
||||
return {"errors": [f"issue tracking tool not supported yet, available: {SUPPORTED_TOOLS}"]}, None
|
||||
if tool == integration_jira_cloud.PROVIDER:
|
||||
return None, integration_jira_cloud.JIRAIntegration(tenant_id=tenant_id, user_id=user_id)
|
||||
integration = integration_jira_cloud.JIRAIntegration(tenant_id=tenant_id, user_id=user_id)
|
||||
if integration.integration is not None and not integration.integration.get("valid", True):
|
||||
return {"errors": ["JIRA: connexion issue/unauthorized"]}, integration
|
||||
return None, integration
|
||||
elif tool == integration_github.PROVIDER:
|
||||
return None, integration_github.GitHubIntegration(tenant_id=tenant_id, user_id=user_id)
|
||||
return {"errors": ["lost integration"]}, None
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
|
|||
|
||||
cur.execute(f"""\
|
||||
SELECT
|
||||
s.project_id, s.name, s.project_key
|
||||
s.project_id, s.name, s.project_key, s.save_request_payloads
|
||||
{',s.gdpr' if gdpr else ''}
|
||||
{',COALESCE((SELECT TRUE FROM public.sessions WHERE sessions.project_id = s.project_id LIMIT 1), FALSE) AS recorded' if recorded else ''}
|
||||
{',stack_integrations.count>0 AS stack_integrations' if stack_integrations else ''}
|
||||
|
|
@ -65,27 +65,26 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
|
|||
FROM public.projects AS s
|
||||
{'LEFT JOIN LATERAL (SELECT COUNT(*) AS count FROM public.integrations WHERE s.project_id = integrations.project_id LIMIT 1) AS stack_integrations ON TRUE' if stack_integrations else ''}
|
||||
WHERE s.deleted_at IS NULL
|
||||
ORDER BY s.project_id;"""
|
||||
)
|
||||
ORDER BY s.project_id;""")
|
||||
rows = cur.fetchall()
|
||||
if recording_state:
|
||||
project_ids = [f'({r["project_id"]})' for r in rows]
|
||||
query = f"""SELECT projects.project_id, COALESCE(MAX(start_ts), 0) AS last
|
||||
FROM (VALUES {",".join(project_ids)}) AS projects(project_id)
|
||||
LEFT JOIN sessions USING (project_id)
|
||||
GROUP BY project_id;"""
|
||||
cur.execute(
|
||||
query=query
|
||||
)
|
||||
query = cur.mogrify(f"""SELECT projects.project_id, COALESCE(MAX(start_ts), 0) AS last
|
||||
FROM (VALUES {",".join(project_ids)}) AS projects(project_id)
|
||||
LEFT JOIN sessions USING (project_id)
|
||||
WHERE sessions.start_ts >= %(startDate)s AND sessions.start_ts <= %(endDate)s
|
||||
GROUP BY project_id;""",
|
||||
{"startDate": TimeUTC.now(delta_days=-3), "endDate": TimeUTC.now(delta_days=1)})
|
||||
|
||||
cur.execute(query=query)
|
||||
status = cur.fetchall()
|
||||
for r in rows:
|
||||
r["status"] = "red"
|
||||
for s in status:
|
||||
if s["project_id"] == r["project_id"]:
|
||||
if s["last"] < TimeUTC.now(-2):
|
||||
r["status"] = "red"
|
||||
elif s["last"] < TimeUTC.now(-1):
|
||||
if TimeUTC.now(-2) <= s["last"] < TimeUTC.now(-1):
|
||||
r["status"] = "yellow"
|
||||
else:
|
||||
elif s["last"] >= TimeUTC.now(-1):
|
||||
r["status"] = "green"
|
||||
break
|
||||
|
||||
|
|
@ -109,7 +108,8 @@ def get_project(tenant_id, project_id, include_last_session=False, include_gdpr=
|
|||
SELECT
|
||||
s.project_id,
|
||||
s.project_key,
|
||||
s.name
|
||||
s.name,
|
||||
s.save_request_payloads
|
||||
{",(SELECT max(ss.start_ts) FROM public.sessions AS ss WHERE ss.project_id = %(project_id)s) AS last_recorded_session_at" if include_last_session else ""}
|
||||
{',s.gdpr' if include_gdpr else ''}
|
||||
{tracker_query}
|
||||
|
|
@ -244,7 +244,8 @@ def get_project_key(project_id):
|
|||
where project_id =%(project_id)s AND deleted_at ISNULL;""",
|
||||
{"project_id": project_id})
|
||||
)
|
||||
return cur.fetchone()["project_key"]
|
||||
project = cur.fetchone()
|
||||
return project["project_key"] if project is not None else None
|
||||
|
||||
|
||||
def get_capture_status(project_id):
|
||||
|
|
@ -280,3 +281,13 @@ def update_capture_status(project_id, changes):
|
|||
)
|
||||
|
||||
return changes
|
||||
|
||||
|
||||
def get_projects_ids(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(f"""SELECT s.project_id
|
||||
FROM public.projects AS s
|
||||
WHERE s.deleted_at IS NULL
|
||||
ORDER BY s.project_id;""")
|
||||
rows = cur.fetchall()
|
||||
return [r["project_id"] for r in rows]
|
||||
|
|
|
|||
|
|
@ -1,23 +1,23 @@
|
|||
from chalicelib.utils import helper, pg_client
|
||||
|
||||
|
||||
def get_by_session_id(session_id):
|
||||
def get_by_session_id(session_id, project_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
ch_query = """\
|
||||
SELECT
|
||||
timestamp AS datetime,
|
||||
url,
|
||||
type,
|
||||
duration,
|
||||
resources.duration AS duration,
|
||||
ttfb,
|
||||
header_size,
|
||||
encoded_body_size,
|
||||
decoded_body_size,
|
||||
success,
|
||||
COALESCE(status, CASE WHEN success THEN 200 END) AS status
|
||||
FROM events.resources
|
||||
WHERE session_id = %(session_id)s;"""
|
||||
params = {"session_id": session_id}
|
||||
FROM events.resources INNER JOIN sessions USING (session_id)
|
||||
WHERE session_id = %(session_id)s AND project_id= %(project_id)s;"""
|
||||
params = {"session_id": session_id, "project_id": project_id}
|
||||
cur.execute(cur.mogrify(ch_query, params))
|
||||
rows = cur.fetchall()
|
||||
return helper.list_to_camel_case(rows)
|
||||
|
|
|
|||
|
|
@ -39,7 +39,8 @@ def __group_metadata(session, project_metadata):
|
|||
return meta
|
||||
|
||||
|
||||
def get_by_id2_pg(project_id, session_id, user_id, full_data=False, include_fav_viewed=False, group_metadata=False):
|
||||
def get_by_id2_pg(project_id, session_id, user_id, full_data=False, include_fav_viewed=False, group_metadata=False,
|
||||
live=True):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
extra_query = []
|
||||
if include_fav_viewed:
|
||||
|
|
@ -93,13 +94,13 @@ def get_by_id2_pg(project_id, session_id, user_id, full_data=False, include_fav_
|
|||
data['userEvents'] = events.get_customs_by_sessionId2_pg(project_id=project_id,
|
||||
session_id=session_id)
|
||||
data['mobsUrl'] = sessions_mobs.get_web(sessionId=session_id)
|
||||
data['resources'] = resources.get_by_session_id(session_id=session_id)
|
||||
data['resources'] = resources.get_by_session_id(session_id=session_id, project_id=project_id)
|
||||
|
||||
data['metadata'] = __group_metadata(project_metadata=data.pop("projectMetadata"), session=data)
|
||||
data['issues'] = issues.get_by_session_id(session_id=session_id)
|
||||
data['live'] = assist.is_live(project_id=project_id,
|
||||
session_id=session_id,
|
||||
project_key=data["projectKey"])
|
||||
data['live'] = live and assist.is_live(project_id=project_id,
|
||||
session_id=session_id,
|
||||
project_key=data["projectKey"])
|
||||
data["inDB"] = True
|
||||
return data
|
||||
else:
|
||||
|
|
@ -233,20 +234,19 @@ def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, e
|
|||
data.order = "DESC"
|
||||
sort = 'session_id'
|
||||
if data.sort is not None and data.sort != "session_id":
|
||||
sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
|
||||
else:
|
||||
sort = 'session_id'
|
||||
# sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
|
||||
sort = helper.key_to_snake_case(data.sort)
|
||||
|
||||
meta_keys = metadata.get(project_id=project_id)
|
||||
main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count,
|
||||
COALESCE(JSONB_AGG(full_sessions)
|
||||
FILTER (WHERE rn>%(sessions_limit_s)s AND rn<=%(sessions_limit_e)s), '[]'::JSONB) AS sessions
|
||||
FROM (SELECT *, ROW_NUMBER() OVER (ORDER BY issue_score DESC, {sort} {data.order}, session_id desc) AS rn
|
||||
FROM (SELECT *, ROW_NUMBER() OVER (ORDER BY {sort} {data.order}, issue_score DESC) AS rn
|
||||
FROM (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS}
|
||||
{"," if len(meta_keys) > 0 else ""}{",".join([f'metadata_{m["index"]}' for m in meta_keys])}
|
||||
{query_part}
|
||||
ORDER BY s.session_id desc) AS filtred_sessions
|
||||
ORDER BY issue_score DESC, {sort} {data.order}) AS full_sessions;""",
|
||||
ORDER BY {sort} {data.order}, issue_score DESC) AS full_sessions;""",
|
||||
full_args)
|
||||
# print("--------------------")
|
||||
# print(main_query)
|
||||
|
|
@ -280,9 +280,9 @@ def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, e
|
|||
for i, s in enumerate(sessions):
|
||||
sessions[i]["metadata"] = {k["key"]: sessions[i][f'metadata_{k["index"]}'] for k in meta_keys \
|
||||
if sessions[i][f'metadata_{k["index"]}'] is not None}
|
||||
if not data.group_by_user and data.sort is not None and data.sort != "session_id":
|
||||
sessions = sorted(sessions, key=lambda s: s[helper.key_to_snake_case(data.sort)],
|
||||
reverse=data.order.upper() == "DESC")
|
||||
# if not data.group_by_user and data.sort is not None and data.sort != "session_id":
|
||||
# sessions = sorted(sessions, key=lambda s: s[helper.key_to_snake_case(data.sort)],
|
||||
# reverse=data.order.upper() == "DESC")
|
||||
return {
|
||||
'total': total,
|
||||
'sessions': helper.list_to_camel_case(sessions)
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ def compute():
|
|||
RETURNING *,(SELECT email FROM public.users WHERE role='owner' LIMIT 1);"""
|
||||
)
|
||||
data = cur.fetchone()
|
||||
requests.post('https://parrot.asayer.io/os/telemetry', json={"stats": [process_data(data)]})
|
||||
requests.post('https://api.openreplay.com/os/telemetry', json={"stats": [process_data(data)]})
|
||||
|
||||
|
||||
def new_client():
|
||||
|
|
@ -40,4 +40,4 @@ def new_client():
|
|||
(SELECT email FROM public.users WHERE role='owner' LIMIT 1) AS email
|
||||
FROM public.tenants;""")
|
||||
data = cur.fetchone()
|
||||
requests.post('https://parrot.asayer.io/os/signup', json=process_data(data))
|
||||
requests.post('https://api.openreplay.com/os/signup', json=process_data(data))
|
||||
|
|
|
|||
|
|
@ -571,7 +571,6 @@ def auth_exists(user_id, tenant_id, jwt_iat, jwt_aud):
|
|||
)
|
||||
|
||||
|
||||
@dev.timed
|
||||
def authenticate(email, password, for_change_password=False, for_plugin=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
|
|
|
|||
|
|
@ -88,13 +88,18 @@ class TimeUTC:
|
|||
return datetime.utcfromtimestamp(ts // 1000).strftime(fmt)
|
||||
|
||||
@staticmethod
|
||||
def human_to_timestamp(ts, pattern):
|
||||
def human_to_timestamp(ts, pattern="%Y-%m-%dT%H:%M:%S.%f"):
|
||||
return int(datetime.strptime(ts, pattern).timestamp() * 1000)
|
||||
|
||||
@staticmethod
|
||||
def datetime_to_timestamp(date):
|
||||
if date is None:
|
||||
return None
|
||||
if isinstance(date, str):
|
||||
fp = date.find(".")
|
||||
if fp > 0:
|
||||
date += '0' * (6 - len(date[fp + 1:]))
|
||||
date = datetime.fromisoformat(date)
|
||||
return int(datetime.timestamp(date) * 1000)
|
||||
|
||||
@staticmethod
|
||||
|
|
|
|||
|
|
@ -5,22 +5,24 @@ import requests
|
|||
from jira import JIRA
|
||||
from jira.exceptions import JIRAError
|
||||
from requests.auth import HTTPBasicAuth
|
||||
from starlette import status
|
||||
from starlette.exceptions import HTTPException
|
||||
|
||||
fields = "id, summary, description, creator, reporter, created, assignee, status, updated, comment, issuetype, labels"
|
||||
|
||||
|
||||
class JiraManager:
|
||||
# retries = 5
|
||||
retries = 0
|
||||
|
||||
def __init__(self, url, username, password, project_id=None):
|
||||
self._config = {"JIRA_PROJECT_ID": project_id, "JIRA_URL": url, "JIRA_USERNAME": username,
|
||||
"JIRA_PASSWORD": password}
|
||||
try:
|
||||
self._jira = JIRA({'server': url}, basic_auth=(username, password), logging=True, max_retries=1)
|
||||
self._jira = JIRA(url, basic_auth=(username, password), logging=True, max_retries=1)
|
||||
except Exception as e:
|
||||
print("!!! JIRA AUTH ERROR")
|
||||
print(e)
|
||||
raise e
|
||||
|
||||
def set_jira_project_id(self, project_id):
|
||||
self._config["JIRA_PROJECT_ID"] = project_id
|
||||
|
|
@ -33,8 +35,8 @@ class JiraManager:
|
|||
if (e.status_code // 100) == 4 and self.retries > 0:
|
||||
time.sleep(1)
|
||||
return self.get_projects()
|
||||
print(f"=>Error {e.text}")
|
||||
raise e
|
||||
print(f"=>Exception {e.text}")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"JIRA: {e.text}")
|
||||
projects_dict_list = []
|
||||
for project in projects:
|
||||
projects_dict_list.append(self.__parser_project_info(project))
|
||||
|
|
@ -49,8 +51,8 @@ class JiraManager:
|
|||
if (e.status_code // 100) == 4 and self.retries > 0:
|
||||
time.sleep(1)
|
||||
return self.get_project()
|
||||
print(f"=>Error {e.text}")
|
||||
raise e
|
||||
print(f"=>Exception {e.text}")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"JIRA: {e.text}")
|
||||
return self.__parser_project_info(project)
|
||||
|
||||
def get_issues(self, sql: str, offset: int = 0):
|
||||
|
|
@ -65,8 +67,8 @@ class JiraManager:
|
|||
if (e.status_code // 100) == 4 and self.retries > 0:
|
||||
time.sleep(1)
|
||||
return self.get_issues(sql, offset)
|
||||
print(f"=>Error {e.text}")
|
||||
raise e
|
||||
print(f"=>Exception {e.text}")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"JIRA: {e.text}")
|
||||
|
||||
issue_dict_list = []
|
||||
for issue in issues:
|
||||
|
|
@ -85,8 +87,8 @@ class JiraManager:
|
|||
if (e.status_code // 100) == 4 and self.retries > 0:
|
||||
time.sleep(1)
|
||||
return self.get_issue(issue_id)
|
||||
print(f"=>Error {e.text}")
|
||||
raise e
|
||||
print(f"=>Exception {e.text}")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"JIRA: {e.text}")
|
||||
return self.__parser_issue_info(issue)
|
||||
|
||||
def get_issue_v3(self, issue_id: str):
|
||||
|
|
@ -105,8 +107,8 @@ class JiraManager:
|
|||
if self.retries > 0:
|
||||
time.sleep(1)
|
||||
return self.get_issue_v3(issue_id)
|
||||
print(f"=>Error {e}")
|
||||
raise e
|
||||
print(f"=>Exception {e}")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"JIRA: get issue error")
|
||||
return self.__parser_issue_info(issue.json())
|
||||
|
||||
def create_issue(self, issue_dict):
|
||||
|
|
@ -119,8 +121,8 @@ class JiraManager:
|
|||
if (e.status_code // 100) == 4 and self.retries > 0:
|
||||
time.sleep(1)
|
||||
return self.create_issue(issue_dict)
|
||||
print(f"=>Error {e.text}")
|
||||
raise e
|
||||
print(f"=>Exception {e.text}")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"JIRA: {e.text}")
|
||||
|
||||
def close_issue(self, issue):
|
||||
try:
|
||||
|
|
@ -131,8 +133,8 @@ class JiraManager:
|
|||
if (e.status_code // 100) == 4 and self.retries > 0:
|
||||
time.sleep(1)
|
||||
return self.close_issue(issue)
|
||||
print(f"=>Error {e.text}")
|
||||
raise e
|
||||
print(f"=>Exception {e.text}")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"JIRA: {e.text}")
|
||||
|
||||
def assign_issue(self, issue_id, account_id) -> bool:
|
||||
try:
|
||||
|
|
@ -142,8 +144,8 @@ class JiraManager:
|
|||
if (e.status_code // 100) == 4 and self.retries > 0:
|
||||
time.sleep(1)
|
||||
return self.assign_issue(issue_id, account_id)
|
||||
print(f"=>Error {e.text}")
|
||||
raise e
|
||||
print(f"=>Exception {e.text}")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"JIRA: {e.text}")
|
||||
|
||||
def add_comment(self, issue_id: str, comment: str):
|
||||
try:
|
||||
|
|
@ -153,8 +155,8 @@ class JiraManager:
|
|||
if (e.status_code // 100) == 4 and self.retries > 0:
|
||||
time.sleep(1)
|
||||
return self.add_comment(issue_id, comment)
|
||||
print(f"=>Error {e.text}")
|
||||
raise e
|
||||
print(f"=>Exception {e.text}")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"JIRA: {e.text}")
|
||||
return self.__parser_comment_info(comment)
|
||||
|
||||
def add_comment_v3(self, issue_id: str, comment: str):
|
||||
|
|
@ -190,8 +192,8 @@ class JiraManager:
|
|||
if self.retries > 0:
|
||||
time.sleep(1)
|
||||
return self.add_comment_v3(issue_id, comment)
|
||||
print(f"=>Error {e}")
|
||||
raise e
|
||||
print(f"=>Exception {e}")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"JIRA: comment error")
|
||||
return self.__parser_comment_info(comment_response.json())
|
||||
|
||||
def get_comments(self, issueKey):
|
||||
|
|
@ -206,8 +208,8 @@ class JiraManager:
|
|||
if (e.status_code // 100) == 4 and self.retries > 0:
|
||||
time.sleep(1)
|
||||
return self.get_comments(issueKey)
|
||||
print(f"=>Error {e.text}")
|
||||
raise e
|
||||
print(f"=>Exception {e.text}")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"JIRA: {e.text}")
|
||||
|
||||
def get_meta(self):
|
||||
meta = {}
|
||||
|
|
@ -217,14 +219,16 @@ class JiraManager:
|
|||
|
||||
def get_assignable_users(self):
|
||||
try:
|
||||
users = self._jira.search_assignable_users_for_issues('', project=self._config['JIRA_PROJECT_ID'])
|
||||
users = self._jira.search_assignable_users_for_issues(project=self._config['JIRA_PROJECT_ID'], query="*")
|
||||
except JIRAError as e:
|
||||
self.retries -= 1
|
||||
if (e.status_code // 100) == 4 and self.retries > 0:
|
||||
time.sleep(1)
|
||||
return self.get_assignable_users()
|
||||
print(f"=>Error {e.text}")
|
||||
raise e
|
||||
print(f"=>Exception {e.text}")
|
||||
if e.status_code == 401:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="JIRA: 401 Unauthorized")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"JIRA: {e.text}")
|
||||
users_dict = []
|
||||
for user in users:
|
||||
users_dict.append({
|
||||
|
|
@ -244,8 +248,8 @@ class JiraManager:
|
|||
if (e.status_code // 100) == 4 and self.retries > 0:
|
||||
time.sleep(1)
|
||||
return self.get_issue_types()
|
||||
print(f"=>Error {e.text}")
|
||||
raise e
|
||||
print(f"=>Exception {e.text}")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"JIRA: {e.text}")
|
||||
types_dict = []
|
||||
for type in types:
|
||||
if not type.subtask and not type.name.lower() == "epic":
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import time
|
||||
from threading import Semaphore
|
||||
|
||||
import psycopg2
|
||||
|
|
@ -9,7 +10,8 @@ _PG_CONFIG = {"host": config("pg_host"),
|
|||
"database": config("pg_dbname"),
|
||||
"user": config("pg_user"),
|
||||
"password": config("pg_password"),
|
||||
"port": config("pg_port", cast=int)}
|
||||
"port": config("pg_port", cast=int),
|
||||
"application_name": config("APP_NAME", default="PY")}
|
||||
PG_CONFIG = dict(_PG_CONFIG)
|
||||
if config("pg_timeout", cast=int, default=0) > 0:
|
||||
PG_CONFIG["options"] = f"-c statement_timeout={config('pg_timeout', cast=int) * 1000}"
|
||||
|
|
@ -36,9 +38,14 @@ class ORThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool):
|
|||
|
||||
postgreSQL_pool: ORThreadedConnectionPool = None
|
||||
|
||||
RETRY_MAX = config("PG_RETRY_MAX", cast=int, default=50)
|
||||
RETRY_INTERVAL = config("PG_RETRY_INTERVAL", cast=int, default=2)
|
||||
RETRY = 0
|
||||
|
||||
|
||||
def make_pool():
|
||||
global postgreSQL_pool
|
||||
global RETRY
|
||||
if postgreSQL_pool is not None:
|
||||
try:
|
||||
postgreSQL_pool.closeall()
|
||||
|
|
@ -50,7 +57,13 @@ def make_pool():
|
|||
print("Connection pool created successfully")
|
||||
except (Exception, psycopg2.DatabaseError) as error:
|
||||
print("Error while connecting to PostgreSQL", error)
|
||||
raise error
|
||||
if RETRY < RETRY_MAX:
|
||||
RETRY += 1
|
||||
print(f"waiting for {RETRY_INTERVAL}s before retry n°{RETRY}")
|
||||
time.sleep(RETRY_INTERVAL)
|
||||
make_pool()
|
||||
else:
|
||||
raise error
|
||||
|
||||
|
||||
make_pool()
|
||||
|
|
@ -64,6 +77,8 @@ class PostgresClient:
|
|||
def __init__(self, long_query=False):
|
||||
self.long_query = long_query
|
||||
if long_query:
|
||||
long_config = dict(_PG_CONFIG)
|
||||
long_config["application_name"] += "-LONG"
|
||||
self.connection = psycopg2.connect(**_PG_CONFIG)
|
||||
else:
|
||||
self.connection = postgreSQL_pool.getconn()
|
||||
|
|
|
|||
|
|
@ -1,2 +1,5 @@
|
|||
#!/bin/bash
|
||||
cd sourcemap-reader
|
||||
nohup npm start &> /tmp/sourcemap-reader.log &
|
||||
cd ..
|
||||
uvicorn app:app --host 0.0.0.0 --reload
|
||||
|
|
|
|||
|
|
@ -4,11 +4,11 @@ boto3==1.16.1
|
|||
pyjwt==1.7.1
|
||||
psycopg2-binary==2.8.6
|
||||
elasticsearch==7.9.1
|
||||
jira==2.0.0
|
||||
jira==3.1.1
|
||||
|
||||
|
||||
|
||||
fastapi==0.74.1
|
||||
fastapi==0.75.0
|
||||
uvicorn[standard]==0.17.5
|
||||
python-decouple==3.6
|
||||
pydantic[email]==1.8.2
|
||||
|
|
|
|||
|
|
@ -2,11 +2,13 @@ from fastapi import APIRouter, Depends
|
|||
|
||||
from auth.auth_apikey import APIKeyAuth
|
||||
from auth.auth_jwt import JWTAuth
|
||||
from auth.auth_project import ProjectAuthorizer
|
||||
from or_dependencies import ORRoute
|
||||
|
||||
|
||||
def get_routers() -> (APIRouter, APIRouter, APIRouter):
|
||||
public_app = APIRouter(route_class=ORRoute)
|
||||
app = APIRouter(dependencies=[Depends(JWTAuth())], route_class=ORRoute)
|
||||
app_apikey = APIRouter(dependencies=[Depends(APIKeyAuth())], route_class=ORRoute)
|
||||
app = APIRouter(dependencies=[Depends(JWTAuth()), Depends(ProjectAuthorizer("projectId"))], route_class=ORRoute)
|
||||
app_apikey = APIRouter(dependencies=[Depends(APIKeyAuth()), Depends(ProjectAuthorizer("projectKey"))],
|
||||
route_class=ORRoute)
|
||||
return public_app, app, app_apikey
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from typing import Union
|
||||
|
||||
from decouple import config
|
||||
from fastapi import Depends, Body
|
||||
from fastapi import Depends, Body, BackgroundTasks
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import log_tool_rollbar, sourcemaps, events, sessions_assignments, projects, \
|
||||
|
|
@ -21,8 +21,10 @@ from routers.base import get_routers
|
|||
public_app, app, app_apikey = get_routers()
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}', tags=["sessions"])
|
||||
@app.get('/{projectId}/sessions2/{sessionId}', tags=["sessions"])
|
||||
def get_session2(projectId: int, sessionId: Union[int, str], context: schemas.CurrentContext = Depends(OR_context)):
|
||||
def get_session2(projectId: int, sessionId: Union[int, str], background_tasks: BackgroundTasks,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if isinstance(sessionId, str):
|
||||
return {"errors": ["session not found"]}
|
||||
data = sessions.get_by_id2_pg(project_id=projectId, session_id=sessionId, full_data=True, user_id=context.user_id,
|
||||
|
|
@ -30,12 +32,14 @@ def get_session2(projectId: int, sessionId: Union[int, str], context: schemas.Cu
|
|||
if data is None:
|
||||
return {"errors": ["session not found"]}
|
||||
if data.get("inDB"):
|
||||
sessions_favorite_viewed.view_session(project_id=projectId, user_id=context.user_id, session_id=sessionId)
|
||||
background_tasks.add_task(sessions_favorite_viewed.view_session, project_id=projectId, user_id=context.user_id,
|
||||
session_id=sessionId)
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/favorite', tags=["sessions"])
|
||||
@app.get('/{projectId}/sessions2/{sessionId}/favorite', tags=["sessions"])
|
||||
def add_remove_favorite_session2(projectId: int, sessionId: int,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
|
|
@ -44,6 +48,7 @@ def add_remove_favorite_session2(projectId: int, sessionId: int,
|
|||
session_id=sessionId)}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/assign', tags=["sessions"])
|
||||
@app.get('/{projectId}/sessions2/{sessionId}/assign', tags=["sessions"])
|
||||
def assign_session(projectId: int, sessionId, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_assignments.get_by_session(project_id=projectId, session_id=sessionId,
|
||||
|
|
@ -56,6 +61,7 @@ def assign_session(projectId: int, sessionId, context: schemas.CurrentContext =
|
|||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/errors/{errorId}/sourcemaps', tags=["sessions", "sourcemaps"])
|
||||
@app.get('/{projectId}/sessions2/{sessionId}/errors/{errorId}/sourcemaps', tags=["sessions", "sourcemaps"])
|
||||
def get_error_trace(projectId: int, sessionId: int, errorId: str,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
|
|
@ -67,6 +73,7 @@ def get_error_trace(projectId: int, sessionId: int, errorId: str,
|
|||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/assign/{issueId}', tags=["sessions", "issueTracking"])
|
||||
@app.get('/{projectId}/sessions2/{sessionId}/assign/{issueId}', tags=["sessions", "issueTracking"])
|
||||
def assign_session(projectId: int, sessionId: int, issueId: str,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
|
|
@ -79,6 +86,8 @@ def assign_session(projectId: int, sessionId: int, issueId: str,
|
|||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/sessions/{sessionId}/assign/{issueId}/comment', tags=["sessions", "issueTracking"])
|
||||
@app.put('/{projectId}/sessions/{sessionId}/assign/{issueId}/comment', tags=["sessions", "issueTracking"])
|
||||
@app.post('/{projectId}/sessions2/{sessionId}/assign/{issueId}/comment', tags=["sessions", "issueTracking"])
|
||||
@app.put('/{projectId}/sessions2/{sessionId}/assign/{issueId}/comment', tags=["sessions", "issueTracking"])
|
||||
def comment_assignment(projectId: int, sessionId: int, issueId: str, data: schemas.CommentAssignmentSchema = Body(...),
|
||||
|
|
@ -387,7 +396,7 @@ def delete_sumologic(projectId: int, context: schemas.CurrentContext = Depends(O
|
|||
def get_integration_status(context: schemas.CurrentContext = Depends(OR_context)):
|
||||
error, integration = integrations_manager.get_integration(tenant_id=context.tenant_id,
|
||||
user_id=context.user_id)
|
||||
if error is not None:
|
||||
if error is not None and integration is None:
|
||||
return {"data": {}}
|
||||
return {"data": integration.get_obfuscated()}
|
||||
|
||||
|
|
@ -399,7 +408,7 @@ def add_edit_jira_cloud(data: schemas.JiraGithubSchema = Body(...),
|
|||
error, integration = integrations_manager.get_integration(tool=integration_jira_cloud.PROVIDER,
|
||||
tenant_id=context.tenant_id,
|
||||
user_id=context.user_id)
|
||||
if error is not None:
|
||||
if error is not None and integration is None:
|
||||
return error
|
||||
data.provider = integration_jira_cloud.PROVIDER
|
||||
return {"data": integration.add_edit(data=data.dict())}
|
||||
|
|
@ -422,7 +431,7 @@ def add_edit_github(data: schemas.JiraGithubSchema = Body(...),
|
|||
def delete_default_issue_tracking_tool(context: schemas.CurrentContext = Depends(OR_context)):
|
||||
error, integration = integrations_manager.get_integration(tenant_id=context.tenant_id,
|
||||
user_id=context.user_id)
|
||||
if error is not None:
|
||||
if error is not None and integration is None:
|
||||
return error
|
||||
return {"data": integration.delete()}
|
||||
|
||||
|
|
@ -825,6 +834,21 @@ def sessions_live(projectId: int, userId: str = None, context: schemas.CurrentCo
|
|||
return {'data': data}
|
||||
|
||||
|
||||
@app.get('/{projectId}/assist/sessions/{sessionId}', tags=["assist"])
|
||||
def get_live_session(projectId: int, sessionId: str, background_tasks: BackgroundTasks,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = assist.get_live_session_by_id(project_id=projectId, session_id=sessionId)
|
||||
if data is None:
|
||||
data = sessions.get_by_id2_pg(project_id=projectId, session_id=sessionId, full_data=True,
|
||||
user_id=context.user_id, include_fav_viewed=True, group_metadata=True, live=False)
|
||||
if data is None:
|
||||
return {"errors": ["session not found"]}
|
||||
if data.get("inDB"):
|
||||
background_tasks.add_task(sessions_favorite_viewed.view_session, project_id=projectId,
|
||||
user_id=context.user_id, session_id=sessionId)
|
||||
return {'data': data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/heatmaps/url', tags=["heatmaps"])
|
||||
def get_heatmaps_by_url(projectId: int, data: schemas.GetHeatmapPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
|
|
@ -889,12 +913,14 @@ def errors_stats(projectId: int, startTimestamp: int, endTimestamp: int,
|
|||
|
||||
|
||||
@app.get('/{projectId}/errors/{errorId}', tags=['errors'])
|
||||
def errors_get_details(projectId: int, errorId: str, density24: int = 24, density30: int = 30,
|
||||
def errors_get_details(projectId: int, errorId: str, background_tasks: BackgroundTasks, density24: int = 24,
|
||||
density30: int = 30,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.get_details(project_id=projectId, user_id=context.user_id, error_id=errorId,
|
||||
**{"density24": density24, "density30": density30})
|
||||
if data.get("data") is not None:
|
||||
errors_favorite_viewed.viewed_error(project_id=projectId, user_id=context.user_id, error_id=errorId)
|
||||
background_tasks.add_task(errors_favorite_viewed.viewed_error, project_id=projectId, user_id=context.user_id,
|
||||
error_id=errorId)
|
||||
return data
|
||||
|
||||
|
||||
|
|
@ -1065,78 +1091,6 @@ def change_client_password(data: schemas.EditUserPasswordSchema = Body(...),
|
|||
user_id=context.user_id)
|
||||
|
||||
|
||||
@app.post('/{projectId}/custom_metrics/try', tags=["customMetrics"])
|
||||
@app.put('/{projectId}/custom_metrics/try', tags=["customMetrics"])
|
||||
def try_custom_metric(projectId: int, data: schemas.CreateCustomMetricsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.merged_live(project_id=projectId, data=data)}
|
||||
|
||||
|
||||
@app.post('/{projectId}/custom_metrics', tags=["customMetrics"])
|
||||
@app.put('/{projectId}/custom_metrics', tags=["customMetrics"])
|
||||
def add_custom_metric(projectId: int, data: schemas.CreateCustomMetricsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return custom_metrics.create(project_id=projectId, user_id=context.user_id, data=data)
|
||||
|
||||
|
||||
@app.get('/{projectId}/custom_metrics', tags=["customMetrics"])
|
||||
def get_custom_metrics(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.get_all(project_id=projectId, user_id=context.user_id)}
|
||||
|
||||
|
||||
@app.get('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"])
|
||||
def get_custom_metric(projectId: int, metric_id: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.get(project_id=projectId, user_id=context.user_id, metric_id=metric_id)
|
||||
if data is None:
|
||||
return {"errors": ["custom metric not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/custom_metrics/{metric_id}/sessions', tags=["customMetrics"])
|
||||
def get_custom_metric_sessions(projectId: int, metric_id: int,
|
||||
data: schemas.CustomMetricSessionsPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.get_sessions(project_id=projectId, user_id=context.user_id, metric_id=metric_id, data=data)
|
||||
if data is None:
|
||||
return {"errors": ["custom metric not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/custom_metrics/{metric_id}/chart', tags=["customMetrics"])
|
||||
def get_custom_metric_chart(projectId: int, metric_id: int, data: schemas.CustomMetricChartPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.make_chart(project_id=projectId, user_id=context.user_id, metric_id=metric_id,
|
||||
data=data)
|
||||
if data is None:
|
||||
return {"errors": ["custom metric not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"])
|
||||
@app.put('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"])
|
||||
def update_custom_metric(projectId: int, metric_id: int, data: schemas.UpdateCustomMetricsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.update(project_id=projectId, user_id=context.user_id, metric_id=metric_id, data=data)
|
||||
if data is None:
|
||||
return {"errors": ["custom metric not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/custom_metrics/{metric_id}/status', tags=["customMetrics"])
|
||||
@app.put('/{projectId}/custom_metrics/{metric_id}/status', tags=["customMetrics"])
|
||||
def update_custom_metric_state(projectId: int, metric_id: int,
|
||||
data: schemas.UpdateCustomMetricsStatusSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {
|
||||
"data": custom_metrics.change_state(project_id=projectId, user_id=context.user_id, metric_id=metric_id,
|
||||
status=data.active)}
|
||||
|
||||
|
||||
@app.delete('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"])
|
||||
def delete_custom_metric(projectId: int, metric_id: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.delete(project_id=projectId, user_id=context.user_id, metric_id=metric_id)}
|
||||
|
||||
|
||||
@app.post('/{projectId}/saved_search', tags=["savedSearch"])
|
||||
@app.put('/{projectId}/saved_search', tags=["savedSearch"])
|
||||
def add_saved_search(projectId: int, data: schemas.SavedSearchSchema = Body(...),
|
||||
|
|
|
|||
|
|
@ -325,22 +325,73 @@ def get_dashboard_resources_count_by_type(projectId: int, data: schemas.MetricPa
|
|||
@app.post('/{projectId}/dashboard/overview', tags=["dashboard", "metrics"])
|
||||
@app.get('/{projectId}/dashboard/overview', tags=["dashboard", "metrics"])
|
||||
def get_dashboard_group(projectId: int, data: schemas.MetricPayloadSchema = Body(...)):
|
||||
return {"data": [
|
||||
*helper.explode_widget(key="count_sessions",
|
||||
data=dashboard.get_processed_sessions(project_id=projectId, **data.dict())),
|
||||
results = [
|
||||
{"key": "count_sessions",
|
||||
"data": dashboard.get_processed_sessions(project_id=projectId, **data.dict())},
|
||||
*helper.explode_widget(data={**dashboard.get_application_activity(project_id=projectId, **data.dict()),
|
||||
"chart": dashboard.get_performance(project_id=projectId, **data.dict())
|
||||
.get("chart", [])}),
|
||||
*helper.explode_widget(data=dashboard.get_page_metrics(project_id=projectId, **data.dict())),
|
||||
*helper.explode_widget(data=dashboard.get_user_activity(project_id=projectId, **data.dict())),
|
||||
*helper.explode_widget(data=dashboard.get_pages_dom_build_time(project_id=projectId, **data.dict()),
|
||||
key="avg_pages_dom_buildtime"),
|
||||
*helper.explode_widget(data=dashboard.get_pages_response_time(project_id=projectId, **data.dict()),
|
||||
key="avg_pages_response_time"),
|
||||
{"key": "avg_pages_dom_buildtime",
|
||||
"data": dashboard.get_pages_dom_build_time(project_id=projectId, **data.dict())},
|
||||
{"key": "avg_pages_response_time",
|
||||
"data": dashboard.get_pages_response_time(project_id=projectId, **data.dict())
|
||||
},
|
||||
*helper.explode_widget(dashboard.get_top_metrics(project_id=projectId, **data.dict())),
|
||||
*helper.explode_widget(data=dashboard.get_time_to_render(project_id=projectId, **data.dict()),
|
||||
key="avg_time_to_render"),
|
||||
*helper.explode_widget(dashboard.get_memory_consumption(project_id=projectId, **data.dict())),
|
||||
*helper.explode_widget(dashboard.get_avg_cpu(project_id=projectId, **data.dict())),
|
||||
*helper.explode_widget(dashboard.get_avg_fps(project_id=projectId, **data.dict())),
|
||||
]}
|
||||
{"key": "avg_time_to_render", "data": dashboard.get_time_to_render(project_id=projectId, **data.dict())},
|
||||
{"key": "avg_used_js_heap_size", "data": dashboard.get_memory_consumption(project_id=projectId, **data.dict())},
|
||||
{"key": "avg_cpu", "data": dashboard.get_avg_cpu(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_fps, "data": dashboard.get_avg_fps(project_id=projectId, **data.dict())}
|
||||
]
|
||||
results = sorted(results, key=lambda r: r["key"])
|
||||
return {"data": results}
|
||||
|
||||
|
||||
@app.post('/{projectId}/dashboard/overview2', tags=["dashboard", "metrics"])
|
||||
@app.get('/{projectId}/dashboard/overview2', tags=["dashboard", "metrics"])
|
||||
def get_dashboard_group(projectId: int, data: schemas.MetricPayloadSchema = Body(...)):
|
||||
results = [
|
||||
{"key": schemas.TemplatePredefinedKeys.count_sessions,
|
||||
"data": dashboard.get_processed_sessions(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_image_load_time,
|
||||
"data": dashboard.get_application_activity_avg_image_load_time(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_page_load_time,
|
||||
"data": dashboard.get_application_activity_avg_page_load_time(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_request_load_time,
|
||||
"data": dashboard.get_application_activity_avg_request_load_time(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_dom_content_load_start,
|
||||
"data": dashboard.get_page_metrics_avg_dom_content_load_start(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_first_contentful_pixel,
|
||||
"data": dashboard.get_page_metrics_avg_first_contentful_pixel(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_visited_pages,
|
||||
"data": dashboard.get_user_activity_avg_visited_pages(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_session_duration,
|
||||
"data": dashboard.get_user_activity_avg_session_duration(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_pages_dom_buildtime,
|
||||
"data": dashboard.get_pages_dom_build_time(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_pages_response_time,
|
||||
"data": dashboard.get_pages_response_time(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_response_time,
|
||||
"data": dashboard.get_top_metrics_avg_response_time(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_first_paint,
|
||||
"data": dashboard.get_top_metrics_avg_first_paint(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_dom_content_loaded,
|
||||
"data": dashboard.get_top_metrics_avg_dom_content_loaded(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_till_first_bit,
|
||||
"data": dashboard.get_top_metrics_avg_till_first_bit(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_time_to_interactive,
|
||||
"data": dashboard.get_top_metrics_avg_time_to_interactive(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.count_requests,
|
||||
"data": dashboard.get_top_metrics_count_requests(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_time_to_render,
|
||||
"data": dashboard.get_time_to_render(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_used_js_heap_size,
|
||||
"data": dashboard.get_memory_consumption(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_cpu,
|
||||
"data": dashboard.get_avg_cpu(project_id=projectId, **data.dict())},
|
||||
{"key": schemas.TemplatePredefinedKeys.avg_fps,
|
||||
"data": dashboard.get_avg_fps(project_id=projectId, **data.dict())}
|
||||
]
|
||||
results = sorted(results, key=lambda r: r["key"])
|
||||
return {"data": results}
|
||||
|
|
|
|||
181
api/routers/subs/metrics.py
Normal file
181
api/routers/subs/metrics.py
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
from fastapi import Body, Depends
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import dashboards2, custom_metrics
|
||||
from or_dependencies import OR_context
|
||||
from routers.base import get_routers
|
||||
|
||||
public_app, app, app_apikey = get_routers()
|
||||
|
||||
|
||||
@app.post('/{projectId}/dashboards', tags=["dashboard"])
|
||||
@app.put('/{projectId}/dashboards', tags=["dashboard"])
|
||||
def create_dashboards(projectId: int, data: schemas.CreateDashboardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return dashboards2.create_dashboard(project_id=projectId, user_id=context.user_id, data=data)
|
||||
|
||||
|
||||
@app.get('/{projectId}/dashboards', tags=["dashboard"])
|
||||
def get_dashboards(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards2.get_dashboards(project_id=projectId, user_id=context.user_id)}
|
||||
|
||||
|
||||
@app.get('/{projectId}/dashboards/{dashboardId}', tags=["dashboard"])
|
||||
def get_dashboard(projectId: int, dashboardId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = dashboards2.get_dashboard(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId)
|
||||
if data is None:
|
||||
return {"errors": ["dashboard not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/dashboards/{dashboardId}', tags=["dashboard"])
|
||||
@app.put('/{projectId}/dashboards/{dashboardId}', tags=["dashboard"])
|
||||
def update_dashboard(projectId: int, dashboardId: int, data: schemas.EditDashboardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards2.update_dashboard(project_id=projectId, user_id=context.user_id,
|
||||
dashboard_id=dashboardId, data=data)}
|
||||
|
||||
|
||||
@app.delete('/{projectId}/dashboards/{dashboardId}', tags=["dashboard"])
|
||||
def delete_dashboard(projectId: int, dashboardId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return dashboards2.delete_dashboard(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId)
|
||||
|
||||
|
||||
@app.get('/{projectId}/dashboards/{dashboardId}/pin', tags=["dashboard"])
|
||||
def pin_dashboard(projectId: int, dashboardId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards2.pin_dashboard(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId)}
|
||||
|
||||
|
||||
@app.post('/{projectId}/dashboards/{dashboardId}/widgets', tags=["dashboard"])
|
||||
@app.put('/{projectId}/dashboards/{dashboardId}/widgets', tags=["dashboard"])
|
||||
def add_widget_to_dashboard(projectId: int, dashboardId: int,
|
||||
data: schemas.AddWidgetToDashboardPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards2.add_widget(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId,
|
||||
data=data)}
|
||||
|
||||
|
||||
@app.post('/{projectId}/dashboards/{dashboardId}/metrics', tags=["dashboard"])
|
||||
@app.put('/{projectId}/dashboards/{dashboardId}/metrics', tags=["dashboard"])
|
||||
def create_metric_and_add_to_dashboard(projectId: int, dashboardId: int,
|
||||
data: schemas.CreateCustomMetricsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards2.create_metric_add_widget(project_id=projectId, user_id=context.user_id,
|
||||
dashboard_id=dashboardId, data=data)}
|
||||
|
||||
|
||||
@app.post('/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}', tags=["dashboard"])
|
||||
@app.put('/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}', tags=["dashboard"])
|
||||
def update_widget_in_dashboard(projectId: int, dashboardId: int, widgetId: int,
|
||||
data: schemas.UpdateWidgetPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return dashboards2.update_widget(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId,
|
||||
widget_id=widgetId, data=data)
|
||||
|
||||
|
||||
@app.delete('/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}', tags=["dashboard"])
|
||||
def remove_widget_from_dashboard(projectId: int, dashboardId: int, widgetId: int,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return dashboards2.remove_widget(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId,
|
||||
widget_id=widgetId)
|
||||
|
||||
|
||||
@app.post('/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}/chart', tags=["dashboard"])
|
||||
def get_widget_chart(projectId: int, dashboardId: int, widgetId: int,
|
||||
data: schemas.CustomMetricChartPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = dashboards2.make_chart_widget(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId,
|
||||
widget_id=widgetId, data=data)
|
||||
if data is None:
|
||||
return {"errors": ["widget not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.get('/{projectId}/metrics/templates', tags=["dashboard"])
|
||||
def get_templates(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards2.get_templates(project_id=projectId, user_id=context.user_id)}
|
||||
|
||||
|
||||
@app.post('/{projectId}/metrics/try', tags=["dashboard"])
|
||||
@app.put('/{projectId}/metrics/try', tags=["dashboard"])
|
||||
@app.post('/{projectId}/custom_metrics/try', tags=["customMetrics"])
|
||||
@app.put('/{projectId}/custom_metrics/try', tags=["customMetrics"])
|
||||
def try_custom_metric(projectId: int, data: schemas.TryCustomMetricsPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.merged_live(project_id=projectId, data=data)}
|
||||
|
||||
|
||||
@app.post('/{projectId}/metrics', tags=["dashboard"])
|
||||
@app.put('/{projectId}/metrics', tags=["dashboard"])
|
||||
@app.post('/{projectId}/custom_metrics', tags=["customMetrics"])
|
||||
@app.put('/{projectId}/custom_metrics', tags=["customMetrics"])
|
||||
def add_custom_metric(projectId: int, data: schemas.CreateCustomMetricsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return custom_metrics.create(project_id=projectId, user_id=context.user_id, data=data)
|
||||
|
||||
|
||||
@app.get('/{projectId}/metrics', tags=["dashboard"])
|
||||
@app.get('/{projectId}/custom_metrics', tags=["customMetrics"])
|
||||
def get_custom_metrics(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.get_all(project_id=projectId, user_id=context.user_id)}
|
||||
|
||||
|
||||
@app.get('/{projectId}/metrics/{metric_id}', tags=["dashboard"])
|
||||
@app.get('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"])
|
||||
def get_custom_metric(projectId: int, metric_id: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.get(project_id=projectId, user_id=context.user_id, metric_id=metric_id)
|
||||
if data is None:
|
||||
return {"errors": ["custom metric not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/metrics/{metric_id}/sessions', tags=["dashboard"])
|
||||
@app.post('/{projectId}/custom_metrics/{metric_id}/sessions', tags=["customMetrics"])
|
||||
def get_custom_metric_sessions(projectId: int, metric_id: int,
|
||||
data: schemas.CustomMetricSessionsPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.get_sessions(project_id=projectId, user_id=context.user_id, metric_id=metric_id, data=data)
|
||||
if data is None:
|
||||
return {"errors": ["custom metric not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/metrics/{metric_id}/chart', tags=["dashboard"])
|
||||
@app.post('/{projectId}/custom_metrics/{metric_id}/chart', tags=["customMetrics"])
|
||||
def get_custom_metric_chart(projectId: int, metric_id: int, data: schemas.CustomMetricChartPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = dashboards2.make_chart_metrics(project_id=projectId, user_id=context.user_id, metric_id=metric_id,
|
||||
data=data)
|
||||
if data is None:
|
||||
return {"errors": ["custom metric not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/metrics/{metric_id}', tags=["dashboard"])
|
||||
@app.put('/{projectId}/metrics/{metric_id}', tags=["dashboard"])
|
||||
@app.post('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"])
|
||||
@app.put('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"])
|
||||
def update_custom_metric(projectId: int, metric_id: int, data: schemas.UpdateCustomMetricsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.update(project_id=projectId, user_id=context.user_id, metric_id=metric_id, data=data)
|
||||
if data is None:
|
||||
return {"errors": ["custom metric not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/metrics/{metric_id}/status', tags=["dashboard"])
|
||||
@app.put('/{projectId}/metrics/{metric_id}/status', tags=["dashboard"])
|
||||
@app.post('/{projectId}/custom_metrics/{metric_id}/status', tags=["customMetrics"])
|
||||
@app.put('/{projectId}/custom_metrics/{metric_id}/status', tags=["customMetrics"])
|
||||
def update_custom_metric_state(projectId: int, metric_id: int,
|
||||
data: schemas.UpdateCustomMetricsStatusSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {
|
||||
"data": custom_metrics.change_state(project_id=projectId, user_id=context.user_id, metric_id=metric_id,
|
||||
status=data.active)}
|
||||
|
||||
|
||||
@app.delete('/{projectId}/metrics/{metric_id}', tags=["dashboard"])
|
||||
@app.delete('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"])
|
||||
def delete_custom_metric(projectId: int, metric_id: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.delete(project_id=projectId, user_id=context.user_id, metric_id=metric_id)}
|
||||
111
api/schemas.py
111
api/schemas.py
|
|
@ -776,6 +776,7 @@ class CustomMetricCreateSeriesSchema(BaseModel):
|
|||
class MetricTimeseriesViewType(str, Enum):
|
||||
line_chart = "lineChart"
|
||||
progress = "progress"
|
||||
area_chart = "areaChart"
|
||||
|
||||
|
||||
class MetricTableViewType(str, Enum):
|
||||
|
|
@ -803,8 +804,8 @@ class TimeseriesMetricOfType(str, Enum):
|
|||
|
||||
|
||||
class CustomMetricSessionsPayloadSchema(FlatSessionsSearch):
|
||||
startDate: int = Field(TimeUTC.now(-7))
|
||||
endDate: int = Field(TimeUTC.now())
|
||||
startTimestamp: int = Field(TimeUTC.now(-7))
|
||||
endTimestamp: int = Field(TimeUTC.now())
|
||||
|
||||
class Config:
|
||||
alias_generator = attribute_to_camel_case
|
||||
|
|
@ -817,10 +818,10 @@ class CustomMetricChartPayloadSchema(CustomMetricSessionsPayloadSchema):
|
|||
alias_generator = attribute_to_camel_case
|
||||
|
||||
|
||||
class CreateCustomMetricsSchema(CustomMetricChartPayloadSchema):
|
||||
class TryCustomMetricsPayloadSchema(CustomMetricChartPayloadSchema):
|
||||
name: str = Field(...)
|
||||
series: List[CustomMetricCreateSeriesSchema] = Field(..., min_items=1)
|
||||
is_public: bool = Field(default=True, const=True)
|
||||
series: List[CustomMetricCreateSeriesSchema] = Field(...)
|
||||
is_public: bool = Field(default=True)
|
||||
view_type: Union[MetricTimeseriesViewType, MetricTableViewType] = Field(MetricTimeseriesViewType.line_chart)
|
||||
metric_type: MetricType = Field(MetricType.timeseries)
|
||||
metric_of: Union[TableMetricOfType, TimeseriesMetricOfType] = Field(TableMetricOfType.user_id)
|
||||
|
|
@ -858,6 +859,10 @@ class CreateCustomMetricsSchema(CustomMetricChartPayloadSchema):
|
|||
alias_generator = attribute_to_camel_case
|
||||
|
||||
|
||||
class CreateCustomMetricsSchema(TryCustomMetricsPayloadSchema):
|
||||
series: List[CustomMetricCreateSeriesSchema] = Field(..., min_items=1)
|
||||
|
||||
|
||||
class CustomMetricUpdateSeriesSchema(CustomMetricCreateSeriesSchema):
|
||||
series_id: Optional[int] = Field(None)
|
||||
|
||||
|
|
@ -875,3 +880,99 @@ class UpdateCustomMetricsStatusSchema(BaseModel):
|
|||
|
||||
class SavedSearchSchema(FunnelSchema):
|
||||
filter: FlatSessionsSearchPayloadSchema = Field([])
|
||||
|
||||
|
||||
class CreateDashboardSchema(BaseModel):
|
||||
name: str = Field(..., min_length=1)
|
||||
is_public: bool = Field(default=False)
|
||||
is_pinned: bool = Field(default=False)
|
||||
metrics: Optional[List[int]] = Field(default=[])
|
||||
|
||||
class Config:
|
||||
alias_generator = attribute_to_camel_case
|
||||
|
||||
|
||||
class EditDashboardSchema(CreateDashboardSchema):
|
||||
is_public: Optional[bool] = Field(default=None)
|
||||
is_pinned: Optional[bool] = Field(default=None)
|
||||
|
||||
|
||||
class UpdateWidgetPayloadSchema(BaseModel):
|
||||
config: dict = Field(default={})
|
||||
|
||||
class Config:
|
||||
alias_generator = attribute_to_camel_case
|
||||
|
||||
|
||||
class AddWidgetToDashboardPayloadSchema(UpdateWidgetPayloadSchema):
|
||||
metric_id: int = Field(...)
|
||||
|
||||
class Config:
|
||||
alias_generator = attribute_to_camel_case
|
||||
|
||||
|
||||
# these values should match the keys in metrics table
|
||||
class TemplatePredefinedKeys(str, Enum):
|
||||
count_sessions = "count_sessions"
|
||||
avg_request_load_time = "avg_request_load_time"
|
||||
avg_page_load_time = "avg_page_load_time"
|
||||
avg_image_load_time = "avg_image_load_time"
|
||||
avg_dom_content_load_start = "avg_dom_content_load_start"
|
||||
avg_first_contentful_pixel = "avg_first_contentful_pixel"
|
||||
avg_visited_pages = "avg_visited_pages"
|
||||
avg_session_duration = "avg_session_duration"
|
||||
avg_pages_dom_buildtime = "avg_pages_dom_buildtime"
|
||||
avg_pages_response_time = "avg_pages_response_time"
|
||||
avg_response_time = "avg_response_time"
|
||||
avg_first_paint = "avg_first_paint"
|
||||
avg_dom_content_loaded = "avg_dom_content_loaded"
|
||||
avg_till_first_bit = "avg_till_first_byte"
|
||||
avg_time_to_interactive = "avg_time_to_interactive"
|
||||
count_requests = "count_requests"
|
||||
avg_time_to_render = "avg_time_to_render"
|
||||
avg_used_js_heap_size = "avg_used_js_heap_size"
|
||||
avg_cpu = "avg_cpu"
|
||||
avg_fps = "avg_fps"
|
||||
impacted_sessions_by_js_errors = "impacted_sessions_by_js_errors"
|
||||
domains_errors_4xx = "domains_errors_4xx"
|
||||
domains_errors_5xx = "domains_errors_5xx"
|
||||
errors_per_domains = "errors_per_domains"
|
||||
calls_errors = "calls_errors"
|
||||
errors_by_type = "errors_per_type"
|
||||
errors_by_origin = "resources_by_party"
|
||||
speed_index_by_location = "speed_location"
|
||||
slowest_domains = "slowest_domains"
|
||||
sessions_per_browser = "sessions_per_browser"
|
||||
time_to_render = "time_to_render"
|
||||
impacted_sessions_by_slow_pages = "impacted_sessions_by_slow_pages"
|
||||
memory_consumption = "memory_consumption"
|
||||
cpu_load = "cpu"
|
||||
frame_rate = "fps"
|
||||
crashes = "crashes"
|
||||
resources_vs_visually_complete = "resources_vs_visually_complete"
|
||||
pages_dom_buildtime = "pages_dom_buildtime"
|
||||
pages_response_time = "pages_response_time"
|
||||
pages_response_time_distribution = "pages_response_time_distribution"
|
||||
missing_resources = "missing_resources"
|
||||
slowest_resources = "slowest_resources"
|
||||
resources_fetch_time = "resources_loading_time"
|
||||
resource_type_vs_response_end = "resource_type_vs_response_end"
|
||||
resources_count_by_type = "resources_count_by_type"
|
||||
|
||||
|
||||
class TemplatePredefinedUnits(str, Enum):
|
||||
millisecond = "ms"
|
||||
minute = "min"
|
||||
memory = "mb"
|
||||
frame = "f/s"
|
||||
percentage = "%"
|
||||
count = "count"
|
||||
|
||||
|
||||
class CustomMetricAndTemplate(BaseModel):
|
||||
is_template: bool = Field(...)
|
||||
project_id: Optional[int] = Field(...)
|
||||
predefined_key: Optional[TemplatePredefinedKeys] = Field(...)
|
||||
|
||||
class Config:
|
||||
alias_generator = attribute_to_camel_case
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.13-alpine3.10 AS prepare
|
||||
FROM golang:1.18-alpine3.15 AS prepare
|
||||
|
||||
RUN apk add --no-cache git openssh openssl-dev pkgconf gcc g++ make libc-dev bash
|
||||
|
||||
|
|
@ -27,6 +27,7 @@ ENV TZ=UTC \
|
|||
HTTP_PORT=80 \
|
||||
BEACON_SIZE_LIMIT=7000000 \
|
||||
KAFKA_USE_SSL=true \
|
||||
KAFKA_MAX_POLL_INTERVAL_MS=400000 \
|
||||
REDIS_STREAMS_MAX_LEN=3000 \
|
||||
TOPIC_RAW_WEB=raw \
|
||||
TOPIC_RAW_IOS=raw-ios \
|
||||
|
|
|
|||
|
|
@ -1,14 +1,12 @@
|
|||
module openreplay/backend
|
||||
|
||||
go 1.13
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
cloud.google.com/go/logging v1.4.2
|
||||
github.com/ClickHouse/clickhouse-go v1.4.3
|
||||
github.com/Masterminds/squirrel v1.5.0
|
||||
github.com/aws/aws-sdk-go v1.35.23
|
||||
github.com/btcsuite/btcutil v1.0.2
|
||||
github.com/confluentinc/confluent-kafka-go v1.7.0 // indirect
|
||||
github.com/elastic/go-elasticsearch/v7 v7.13.1
|
||||
github.com/go-redis/redis v6.15.9+incompatible
|
||||
github.com/google/uuid v1.1.2
|
||||
|
|
@ -16,14 +14,47 @@ require (
|
|||
github.com/jackc/pgconn v1.6.0
|
||||
github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451
|
||||
github.com/jackc/pgx/v4 v4.6.0
|
||||
github.com/klauspost/compress v1.11.9 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5
|
||||
github.com/lib/pq v1.2.0
|
||||
github.com/oschwald/maxminddb-golang v1.7.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
|
||||
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420
|
||||
google.golang.org/api v0.50.0
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0
|
||||
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.84.0 // indirect
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect
|
||||
github.com/confluentinc/confluent-kafka-go v1.7.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
github.com/jackc/pgio v1.0.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.0.2 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8 // indirect
|
||||
github.com/jackc/pgtype v1.3.0 // indirect
|
||||
github.com/jackc/puddle v1.1.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jstemmer/go-junit-report v0.9.1 // indirect
|
||||
github.com/klauspost/compress v1.11.9 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
|
||||
golang.org/x/mod v0.4.2 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 // indirect
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/tools v0.1.4 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 // indirect
|
||||
google.golang.org/grpc v1.38.0 // indirect
|
||||
google.golang.org/protobuf v1.26.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.8 // indirect
|
||||
)
|
||||
|
|
|
|||
|
|
@ -46,8 +46,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
|||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/ClickHouse/clickhouse-go v1.4.3 h1:iAFMa2UrQdR5bHJ2/yaSLffZkxpcOYQMCUuKeNXGdqc=
|
||||
github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
|
||||
github.com/Masterminds/squirrel v1.5.0 h1:JukIZisrUXadA9pl3rMkjhiamxiB0cXiu+HGp/Y8cY8=
|
||||
github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/aws/aws-sdk-go v1.35.23 h1:SCP0d0XvyJTDmfnHEQPvBaYi3kea1VNUo7uQmkVgFts=
|
||||
github.com/aws/aws-sdk-go v1.35.23/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
|
||||
|
|
@ -75,8 +73,8 @@ github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnht
|
|||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/confluentinc/confluent-kafka-go v1.5.2 h1:l+qt+a0Okmq0Bdr1P55IX4fiwFJyg0lZQmfHkAFkv7E=
|
||||
github.com/confluentinc/confluent-kafka-go v1.5.2/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg=
|
||||
github.com/confluentinc/confluent-kafka-go v1.7.0 h1:tXh3LWb2Ne0WiU3ng4h5qiGA9XV61rz46w60O+cq8bM=
|
||||
github.com/confluentinc/confluent-kafka-go v1.7.0/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
|
|
@ -93,7 +91,6 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y
|
|||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
|
|
@ -135,7 +132,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
|
|||
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
|
|
@ -152,11 +148,9 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=
|
||||
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
|
|
@ -184,7 +178,6 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
|||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
|
||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
|
||||
|
|
@ -203,7 +196,6 @@ github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye47
|
|||
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A=
|
||||
github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
|
||||
|
|
@ -219,7 +211,6 @@ github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCM
|
|||
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
|
||||
github.com/jackc/pgtype v1.3.0 h1:l8JvKrby3RI7Kg3bYEeU9TA4vqC38QDpFCfcrC7KuN0=
|
||||
github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik=
|
||||
github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o=
|
||||
github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
|
||||
|
|
@ -254,10 +245,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
|
||||
|
|
@ -682,8 +669,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
|
|||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.5.2 h1:g0WBLy6fobNUU8W/e9zx6I0Yl79Ya+BDW1NwzAlTiiQ=
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.5.2/go.mod h1:ZdI3yfYmdNSLQPNCpO1y00EHyWaHG5EnQEyL/ntAegY=
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0 h1:+RlmciBLDd/XwM1iudiG3HtCg45purnsOxEoY/+JZdQ=
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0/go.mod h1:ZdI3yfYmdNSLQPNCpO1y00EHyWaHG5EnQEyL/ntAegY=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
|
|
|
|||
|
|
@ -28,30 +28,6 @@ func (c *PGCache) InsertIssueEvent(sessionID uint64, crash *IssueEvent) error {
|
|||
return c.Conn.InsertIssueEvent(sessionID, session.ProjectID, crash)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertUserID(sessionID uint64, userID *IOSUserID) error {
|
||||
if err := c.Conn.InsertIOSUserID(sessionID, userID); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
session.UserID = &userID.Value
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertUserAnonymousID(sessionID uint64, userAnonymousID *IOSUserAnonymousID) error {
|
||||
if err := c.Conn.InsertIOSUserAnonymousID(sessionID, userAnonymousID); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
session.UserAnonymousID = &userAnonymousID.Value
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertMetadata(sessionID uint64, metadata *Metadata) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
if err != nil {
|
||||
|
|
@ -1,228 +0,0 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
sq "github.com/Masterminds/squirrel"
|
||||
"log"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TimeString sql.NullString
|
||||
type query struct {
|
||||
Left string `db:"query.left" json:"left"`
|
||||
Operator string `db:"query.operator" json:"operator"`
|
||||
Right float64 `db:"query.right" json:"right"`
|
||||
}
|
||||
type options struct {
|
||||
RenotifyInterval int64 `db:"options.renotifyInterval" json:"renotifyInterval"`
|
||||
LastNotification int64 `db:"options.lastNotification" json:"lastNotification;omitempty"`
|
||||
CurrentPeriod int64 `db:"options.currentPeriod" json:"currentPeriod"`
|
||||
PreviousPeriod int64 `db:"options.previousPeriod" json:"previousPeriod;omitempty"`
|
||||
Message []map[string]string `db:"options.message" json:"message;omitempty"`
|
||||
Change string `db:"options.change" json:"change;omitempty"`
|
||||
}
|
||||
type Alert struct {
|
||||
AlertID uint32 `db:"alert_id" json:"alert_id"`
|
||||
ProjectID uint32 `db:"project_id" json:"project_id"`
|
||||
Name string `db:"name" json:"name"`
|
||||
Description sql.NullString `db:"description" json:"description"`
|
||||
Active bool `db:"active" json:"active"`
|
||||
DetectionMethod string `db:"detection_method" json:"detection_method"`
|
||||
Query query `db:"query" json:"query"`
|
||||
DeletedAt *int64 `db:"deleted_at" json:"deleted_at"`
|
||||
CreatedAt *int64 `db:"created_at" json:"created_at"`
|
||||
Options options `db:"options" json:"options"`
|
||||
TenantId uint32 `db:"tenant_id" json:"tenant_id"`
|
||||
}
|
||||
|
||||
func (pg *Conn) IterateAlerts(iter func(alert *Alert, err error)) error {
|
||||
rows, err := pg.query(`
|
||||
SELECT
|
||||
alerts.alert_id,
|
||||
alerts.project_id,
|
||||
alerts.name,
|
||||
alerts.description,
|
||||
alerts.active,
|
||||
alerts.detection_method,
|
||||
alerts.query,
|
||||
CAST(EXTRACT(epoch FROM alerts.deleted_at) * 1000 AS BIGINT) AS deleted_at,
|
||||
CAST(EXTRACT(epoch FROM alerts.created_at) * 1000 AS BIGINT) AS created_at,
|
||||
alerts.options,
|
||||
0 AS tenant_id
|
||||
FROM public.alerts
|
||||
WHERE alerts.active AND alerts.deleted_at ISNULL;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
a := new(Alert)
|
||||
if err = rows.Scan(
|
||||
&a.AlertID,
|
||||
&a.ProjectID,
|
||||
&a.Name,
|
||||
&a.Description,
|
||||
&a.Active,
|
||||
&a.DetectionMethod,
|
||||
&a.Query,
|
||||
&a.DeletedAt,
|
||||
&a.CreatedAt,
|
||||
&a.Options,
|
||||
&a.TenantId,
|
||||
); err != nil {
|
||||
iter(nil, err)
|
||||
continue
|
||||
}
|
||||
iter(a, nil)
|
||||
}
|
||||
|
||||
if err = rows.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pg *Conn) SaveLastNotification(allIds []uint32) error {
|
||||
var paramrefs string
|
||||
for _, v := range allIds {
|
||||
paramrefs += strconv.Itoa(int(v)) + `,`
|
||||
}
|
||||
paramrefs = paramrefs[:len(paramrefs)-1] // remove last ","
|
||||
q := "UPDATE public.Alerts SET options = options||'{\"lastNotification\":" + strconv.Itoa(int(time.Now().Unix()*1000)) + "}'::jsonb WHERE alert_id IN (" + paramrefs + ");"
|
||||
//log.Println(q)
|
||||
log.Println("Updating PG")
|
||||
return pg.exec(q)
|
||||
}
|
||||
|
||||
type columnDefinition struct {
|
||||
table string
|
||||
formula string
|
||||
condition string
|
||||
group string
|
||||
}
|
||||
|
||||
var LeftToDb = map[string]columnDefinition{
|
||||
"performance.dom_content_loaded.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "COALESCE(AVG(NULLIF(dom_content_loaded_time ,0)),0)"},
|
||||
"performance.first_meaningful_paint.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "COALESCE(AVG(NULLIF(first_contentful_paint_time,0)),0)"},
|
||||
"performance.page_load_time.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(load_time ,0))"},
|
||||
"performance.dom_build_time.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(dom_building_time,0))"},
|
||||
"performance.speed_index.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(speed_index,0))"},
|
||||
"performance.page_response_time.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(response_time,0))"},
|
||||
"performance.ttfb.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(first_paint_time,0))"},
|
||||
"performance.time_to_render.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(visually_complete,0))"},
|
||||
"performance.image_load_time.average": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(resources.duration,0))", condition: "type='img'"},
|
||||
"performance.request_load_time.average": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(resources.duration,0))", condition: "type='fetch'"},
|
||||
"resources.load_time.average": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(resources.duration,0))"},
|
||||
"resources.missing.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(DISTINCT url_hostpath)", condition: "success= FALSE"},
|
||||
"errors.4xx_5xx.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(session_id)", condition: "status/100!=2"},
|
||||
"errors.4xx.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(session_id)", condition: "status/100=4"},
|
||||
"errors.5xx.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(session_id)", condition: "status/100=5"},
|
||||
"errors.javascript.impacted_sessions.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(DISTINCT session_id)", condition: "success= FALSE AND type='script'"},
|
||||
"performance.crashes.count": {table: "(SELECT *, start_ts AS timestamp FROM public.sessions WHERE errors_count > 0) AS sessions", formula: "COUNT(DISTINCT session_id)", condition: "errors_count > 0"},
|
||||
"errors.javascript.count": {table: "events.errors INNER JOIN public.errors AS m_errors USING (error_id)", formula: "COUNT(DISTINCT session_id)", condition: "source='js_exception'"},
|
||||
"errors.backend.count": {table: "events.errors INNER JOIN public.errors AS m_errors USING (error_id)", formula: "COUNT(DISTINCT session_id)", condition: "source!='js_exception'"},
|
||||
}
|
||||
|
||||
//This is the frequency of execution for each threshold
|
||||
var TimeInterval = map[int64]int64{
|
||||
15: 3,
|
||||
30: 5,
|
||||
60: 10,
|
||||
120: 20,
|
||||
240: 30,
|
||||
1440: 60,
|
||||
}
|
||||
|
||||
func (a *Alert) CanCheck() bool {
|
||||
now := time.Now().Unix() * 1000
|
||||
var repetitionBase int64
|
||||
|
||||
if repetitionBase = a.Options.CurrentPeriod; a.DetectionMethod == "change" && a.Options.CurrentPeriod > a.Options.PreviousPeriod {
|
||||
repetitionBase = a.Options.PreviousPeriod
|
||||
}
|
||||
|
||||
if _, ok := TimeInterval[repetitionBase]; !ok {
|
||||
log.Printf("repetitionBase: %d NOT FOUND", repetitionBase)
|
||||
return false
|
||||
}
|
||||
return a.DeletedAt == nil && a.Active &&
|
||||
(a.Options.RenotifyInterval <= 0 ||
|
||||
a.Options.LastNotification <= 0 ||
|
||||
((now - a.Options.LastNotification) > a.Options.RenotifyInterval*60*1000)) &&
|
||||
((now-*a.CreatedAt)%(TimeInterval[repetitionBase]*60*1000)) < 60*1000
|
||||
}
|
||||
|
||||
func (a *Alert) Build() (sq.SelectBuilder, error) {
|
||||
colDef, ok := LeftToDb[a.Query.Left]
|
||||
if !ok {
|
||||
return sq.Select(), errors.New(fmt.Sprintf("!! unsupported metric '%s' from alert: %d:%s\n", a.Query.Left, a.AlertID, a.Name))
|
||||
}
|
||||
|
||||
subQ := sq.
|
||||
Select(colDef.formula + " AS value").
|
||||
From(colDef.table).
|
||||
Where(sq.And{sq.Expr("project_id = $1 ", a.ProjectID),
|
||||
sq.Expr(colDef.condition)})
|
||||
q := sq.Select(fmt.Sprint("value, coalesce(value,0)", a.Query.Operator, a.Query.Right, " AS valid"))
|
||||
if len(colDef.group) > 0 {
|
||||
subQ = subQ.Column(colDef.group + " AS group_value")
|
||||
subQ = subQ.GroupBy(colDef.group)
|
||||
q = q.Column("group_value")
|
||||
}
|
||||
|
||||
if a.DetectionMethod == "threshold" {
|
||||
q = q.FromSelect(subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60)), "stat")
|
||||
} else if a.DetectionMethod == "change" {
|
||||
if a.Options.Change == "change" {
|
||||
if len(colDef.group) == 0 {
|
||||
sub1, args1, _ := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60)).ToSql()
|
||||
sub2, args2, _ := subQ.Where(
|
||||
sq.And{
|
||||
sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
||||
sq.Expr("timestamp>=$4 ", time.Now().Unix()-2*a.Options.CurrentPeriod*60),
|
||||
}).ToSql()
|
||||
sub1, _, _ = sq.Expr("SELECT ((" + sub1 + ")-(" + sub2 + ")) AS value").ToSql()
|
||||
q = q.JoinClause("FROM ("+sub1+") AS stat", append(args1, args2...)...)
|
||||
} else {
|
||||
subq1 := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60))
|
||||
sub2, args2, _ := subQ.Where(
|
||||
sq.And{
|
||||
sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
||||
sq.Expr("timestamp>=$4 ", time.Now().Unix()-2*a.Options.CurrentPeriod*60),
|
||||
}).ToSql()
|
||||
sub1 := sq.Select("group_value", "(stat1.value-stat2.value) AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...)
|
||||
q = q.FromSelect(sub1, "stat")
|
||||
}
|
||||
} else if a.Options.Change == "percent" {
|
||||
if len(colDef.group) == 0 {
|
||||
sub1, args1, _ := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60)).ToSql()
|
||||
sub2, args2, _ := subQ.Where(
|
||||
sq.And{
|
||||
sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
||||
sq.Expr("timestamp>=$4 ", time.Now().Unix()-a.Options.PreviousPeriod*60-a.Options.CurrentPeriod*60),
|
||||
}).ToSql()
|
||||
sub1, _, _ = sq.Expr("SELECT ((" + sub1 + ")/(" + sub2 + ")-1)*100 AS value").ToSql()
|
||||
q = q.JoinClause("FROM ("+sub1+") AS stat", append(args1, args2...)...)
|
||||
} else {
|
||||
subq1 := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60))
|
||||
sub2, args2, _ := subQ.Where(
|
||||
sq.And{
|
||||
sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
||||
sq.Expr("timestamp>=$4 ", time.Now().Unix()-a.Options.PreviousPeriod*60-a.Options.CurrentPeriod*60),
|
||||
}).ToSql()
|
||||
sub1 := sq.Select("group_value", "(stat1.value/stat2.value-1)*100 AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...)
|
||||
q = q.FromSelect(sub1, "stat")
|
||||
}
|
||||
} else {
|
||||
return q, errors.New("unsupported change method")
|
||||
}
|
||||
|
||||
} else {
|
||||
return q, errors.New("unsupported detection method")
|
||||
}
|
||||
return q, nil
|
||||
}
|
||||
|
|
@ -11,7 +11,6 @@ import (
|
|||
type Listener struct {
|
||||
conn *pgx.Conn
|
||||
Integrations chan *Integration
|
||||
Alerts chan *Alert
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
|
|
@ -32,23 +31,6 @@ func NewIntegrationsListener(url string) (*Listener, error) {
|
|||
return listener, nil
|
||||
}
|
||||
|
||||
func NewAlertsListener(url string) (*Listener, error) {
|
||||
conn, err := pgx.Connect(context.Background(), url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener := &Listener{
|
||||
conn: conn,
|
||||
Errors: make(chan error),
|
||||
}
|
||||
listener.Alerts = make(chan *Alert, 50)
|
||||
if _, err := conn.Exec(context.Background(), "LISTEN alert"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go listener.listen()
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
func (listener *Listener) listen() {
|
||||
for {
|
||||
notification, err := listener.conn.WaitForNotification(context.Background())
|
||||
|
|
@ -64,13 +46,6 @@ func (listener *Listener) listen() {
|
|||
} else {
|
||||
listener.Integrations <- integrationP
|
||||
}
|
||||
case "alert":
|
||||
alertP := new(Alert)
|
||||
if err := json.Unmarshal([]byte(notification.Payload), alertP); err != nil {
|
||||
listener.Errors <- fmt.Errorf("%v | Payload: %v", err, notification.Payload)
|
||||
} else {
|
||||
listener.Alerts <- alertP
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ func (conn *Conn) InsertWebStatsPerformance(sessionID uint64, p *PerformanceTrac
|
|||
}
|
||||
|
||||
func (conn *Conn) InsertWebStatsResourceEvent(sessionID uint64, e *ResourceEvent) error {
|
||||
host, _, err := url.GetURLParts(e.URL)
|
||||
host, _, _, err := url.GetURLParts(e.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -55,7 +55,7 @@ func (conn *Conn) InsertWebUserAnonymousID(sessionID uint64, userAnonymousID *Us
|
|||
|
||||
// TODO: fix column "dom_content_loaded_event_end" of relation "pages"
|
||||
func (conn *Conn) InsertWebPageEvent(sessionID uint64, e *PageEvent) error {
|
||||
host, path, err := url.GetURLParts(e.URL)
|
||||
host, path, query, err := url.GetURLParts(e.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -64,20 +64,27 @@ func (conn *Conn) InsertWebPageEvent(sessionID uint64, e *PageEvent) error {
|
|||
return err
|
||||
}
|
||||
defer tx.rollback()
|
||||
// base_path is depricated
|
||||
if err := tx.exec(`
|
||||
INSERT INTO events.pages (
|
||||
session_id, message_id, timestamp, referrer, base_referrer, host, path, base_path,
|
||||
session_id, message_id, timestamp, referrer, base_referrer, host, path, query,
|
||||
dom_content_loaded_time, load_time, response_end, first_paint_time, first_contentful_paint_time,
|
||||
speed_index, visually_complete, time_to_interactive,
|
||||
response_time, dom_building_time
|
||||
response_time, dom_building_time,
|
||||
base_path
|
||||
) VALUES (
|
||||
$1, $2, $3, $4, $5, $6, $7, $8,
|
||||
$1, $2, $3,
|
||||
$4, $5,
|
||||
$6, $7, $8,
|
||||
NULLIF($9, 0), NULLIF($10, 0), NULLIF($11, 0), NULLIF($12, 0), NULLIF($13, 0),
|
||||
NULLIF($14, 0), NULLIF($15, 0), NULLIF($16, 0),
|
||||
NULLIF($17, 0), NULLIF($18, 0)
|
||||
NULLIF($17, 0), NULLIF($18, 0),
|
||||
'',
|
||||
)
|
||||
`,
|
||||
sessionID, e.MessageID, e.Timestamp, e.Referrer, url.DiscardURLQuery(e.Referrer), host, path, url.DiscardURLQuery(path),
|
||||
sessionID, e.MessageID, e.Timestamp,
|
||||
e.Referrer, url.DiscardURLQuery(e.Referrer),
|
||||
host, path, query,
|
||||
e.DomContentLoadedEventEnd, e.LoadEventEnd, e.ResponseEnd, e.FirstPaint, e.FirstContentfulPaint,
|
||||
e.SpeedIndex, e.VisuallyComplete, e.TimeToInteractive,
|
||||
calcResponseTime(e), calcDomBuildingTime(e),
|
||||
|
|
@ -109,7 +116,7 @@ func (conn *Conn) InsertWebClickEvent(sessionID uint64, e *ClickEvent) error {
|
|||
INSERT INTO events.clicks
|
||||
(session_id, message_id, timestamp, label, selector, url)
|
||||
(SELECT
|
||||
$1, $2, $3, NULLIF($4, ''), $5, host || base_path
|
||||
$1, $2, $3, NULLIF($4, ''), $5, host || path
|
||||
FROM events.pages
|
||||
WHERE session_id = $1 AND timestamp <= $3 ORDER BY timestamp DESC LIMIT 1
|
||||
)
|
||||
|
|
@ -210,20 +217,27 @@ func (conn *Conn) InsertWebFetchEvent(sessionID uint64, savePayload bool, e *Fet
|
|||
request = &e.Request
|
||||
response = &e.Response
|
||||
}
|
||||
conn.insertAutocompleteValue(sessionID, "REQUEST", url.DiscardURLQuery(e.URL))
|
||||
host, path, query, err := url.GetURLParts(e.URL)
|
||||
conn.insertAutocompleteValue(sessionID, "REQUEST", path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return conn.batchQueue(sessionID, `
|
||||
INSERT INTO events_common.requests (
|
||||
session_id, timestamp,
|
||||
seq_index, url, duration, success,
|
||||
request_body, response_body, status_code, method
|
||||
session_id, timestamp, seq_index,
|
||||
url, host, path, query,
|
||||
request_body, response_body, status_code, method,
|
||||
duration, success,
|
||||
) VALUES (
|
||||
$1, $2,
|
||||
$3, $4, $5, $6,
|
||||
$7, $8, $9::smallint, NULLIF($10, '')::http_method
|
||||
$1, $2, $3,
|
||||
$4, $5, $6, $7
|
||||
$8, $9, $10::smallint, NULLIF($11, '')::http_method,
|
||||
$12, $13
|
||||
) ON CONFLICT DO NOTHING`,
|
||||
sessionID, e.Timestamp,
|
||||
getSqIdx(e.MessageID), e.URL, e.Duration, e.Status < 400,
|
||||
sessionID, e.Timestamp, getSqIdx(e.MessageID),
|
||||
e.URL, host, path, query,
|
||||
request, response, e.Status, url.EnsureMethod(e.Method),
|
||||
e.Duration, e.Status < 400,
|
||||
)
|
||||
|
||||
}
|
||||
13
backend/pkg/pprof/pprof.go
Normal file
13
backend/pkg/pprof/pprof.go
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
package pprof
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
)
|
||||
|
||||
func StartProfilingServer() {
|
||||
go func() {
|
||||
log.Println(http.ListenAndServe("localhost:6060", nil))
|
||||
}()
|
||||
}
|
||||
|
|
@ -1,15 +1,14 @@
|
|||
package queue
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/redisstream"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/redisstream"
|
||||
)
|
||||
|
||||
func NewConsumer(group string, topics []string, handler types.MessageHandler) types.Consumer {
|
||||
func NewConsumer(group string, topics []string, handler types.MessageHandler, _ bool) types.Consumer {
|
||||
return redisstream.NewConsumer(group, topics, handler)
|
||||
}
|
||||
|
||||
func NewProducer() types.Producer {
|
||||
return redisstream.NewProducer()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,13 +7,12 @@ import (
|
|||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
|
||||
func NewMessageConsumer(group string, topics []string, handler types.DecodedMessageHandler) types.Consumer {
|
||||
func NewMessageConsumer(group string, topics []string, handler types.DecodedMessageHandler, autoCommit bool) types.Consumer {
|
||||
return NewConsumer(group, topics, func(sessionID uint64, value []byte, meta *types.Meta) {
|
||||
if err := messages.ReadBatch(value, func(msg messages.Message) {
|
||||
handler(sessionID, msg, meta)
|
||||
}); err != nil {
|
||||
log.Printf("Decode error: %v\n", err)
|
||||
}
|
||||
})
|
||||
}, autoCommit)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,26 +6,22 @@ import (
|
|||
|
||||
type Consumer interface {
|
||||
ConsumeNext() error
|
||||
DisableAutoCommit()
|
||||
Commit() error
|
||||
CommitBack(gap int64) error
|
||||
Close()
|
||||
}
|
||||
|
||||
|
||||
type Producer interface {
|
||||
Produce(topic string, key uint64, value []byte) error
|
||||
Close(timeout int)
|
||||
Flush(timeout int)
|
||||
}
|
||||
|
||||
|
||||
type Meta struct {
|
||||
ID uint64
|
||||
Topic string
|
||||
ID uint64
|
||||
Topic string
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
type MessageHandler func(uint64, []byte, *Meta)
|
||||
type DecodedMessageHandler func(uint64, messages.Message, *Meta)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,24 +1,22 @@
|
|||
package redisstream
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"log"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
_redis "github.com/go-redis/redis"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
|
||||
|
||||
type idsInfo struct{
|
||||
id []string
|
||||
ts []int64
|
||||
type idsInfo struct {
|
||||
id []string
|
||||
ts []int64
|
||||
}
|
||||
type streamPendingIDsMap map[string]*idsInfo
|
||||
|
||||
|
|
@ -41,26 +39,25 @@ func NewConsumer(group string, streams []string, messageHandler types.MessageHan
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
idsPending := make(streamPendingIDsMap)
|
||||
|
||||
streamsCount := len(streams)
|
||||
for i := 0; i < streamsCount; i++ {
|
||||
// ">" is for never-delivered messages.
|
||||
// Otherwise - never acknoledged only
|
||||
// ">" is for never-delivered messages.
|
||||
// Otherwise - never acknoledged only
|
||||
// TODO: understand why in case of "0" it eats 100% cpu
|
||||
streams = append(streams, ">")
|
||||
|
||||
streams = append(streams, ">")
|
||||
|
||||
idsPending[streams[i]] = new(idsInfo)
|
||||
}
|
||||
|
||||
return &Consumer{
|
||||
redis: redis,
|
||||
redis: redis,
|
||||
messageHandler: messageHandler,
|
||||
streams: streams,
|
||||
group: group,
|
||||
autoCommit: true,
|
||||
idsPending: idsPending,
|
||||
streams: streams,
|
||||
group: group,
|
||||
autoCommit: true,
|
||||
idsPending: idsPending,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -106,9 +103,9 @@ func (c *Consumer) ConsumeNext() error {
|
|||
return errors.New("Too many messages per ms in redis")
|
||||
}
|
||||
c.messageHandler(sessionID, []byte(valueString), &types.Meta{
|
||||
Topic: r.Stream,
|
||||
Topic: r.Stream,
|
||||
Timestamp: int64(ts),
|
||||
ID: ts << 13 | (idx & 0x1FFF), // Max: 4096 messages/ms for 69 years
|
||||
ID: ts<<13 | (idx & 0x1FFF), // Max: 4096 messages/ms for 69 years
|
||||
})
|
||||
if c.autoCommit {
|
||||
if err = c.redis.XAck(r.Stream, c.group, m.ID).Err(); err != nil {
|
||||
|
|
@ -119,7 +116,7 @@ func (c *Consumer) ConsumeNext() error {
|
|||
c.idsPending[r.Stream].id = append(c.idsPending[r.Stream].id, m.ID)
|
||||
c.idsPending[r.Stream].ts = append(c.idsPending[r.Stream].ts, int64(ts))
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
@ -158,13 +155,9 @@ func (c *Consumer) CommitBack(gap int64) error {
|
|||
c.idsPending[stream].id = idsInfo.id[maxI:]
|
||||
c.idsPending[stream].ts = idsInfo.ts[maxI:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Consumer) DisableAutoCommit() {
|
||||
//c.autoCommit = false
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Consumer) Close() {
|
||||
// noop
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,8 +22,8 @@ func NewTokenizer(secret string) *Tokenizer {
|
|||
}
|
||||
|
||||
type TokenData struct {
|
||||
ID uint64
|
||||
ExpTime int64
|
||||
ID uint64
|
||||
ExpTime int64
|
||||
}
|
||||
|
||||
func (tokenizer *Tokenizer) sign(body string) []byte {
|
||||
|
|
@ -33,7 +33,7 @@ func (tokenizer *Tokenizer) sign(body string) []byte {
|
|||
}
|
||||
|
||||
func (tokenizer *Tokenizer) Compose(d TokenData) string {
|
||||
body := strconv.FormatUint(d.ID, 36) +
|
||||
body := strconv.FormatUint(d.ID, 36) +
|
||||
"." + strconv.FormatInt(d.ExpTime, 36)
|
||||
sign := base58.Encode(tokenizer.sign(body))
|
||||
return body + "." + sign
|
||||
|
|
@ -58,8 +58,8 @@ func (tokenizer *Tokenizer) Parse(token string) (*TokenData, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if expTime <= time.Now().UnixNano()/1e6 {
|
||||
return &TokenData{id,expTime}, EXPIRED
|
||||
if expTime <= time.Now().UnixMilli() {
|
||||
return &TokenData{id, expTime}, EXPIRED
|
||||
}
|
||||
return &TokenData{id,expTime}, nil
|
||||
return &TokenData{id, expTime}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,11 +5,18 @@ import (
|
|||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
)
|
||||
|
||||
func getSessionKey(sessionID uint64) string {
|
||||
// Based on timestamp, changes once per week. Check pkg/flakeid for understanding sessionID
|
||||
return strconv.FormatUint(sessionID>>50, 10)
|
||||
return strconv.FormatUint(
|
||||
uint64(time.UnixMilli(
|
||||
int64(flakeid.ExtractTimestamp(sessionID)),
|
||||
).Weekday()),
|
||||
10,
|
||||
)
|
||||
}
|
||||
|
||||
func ResolveURL(baseurl string, rawurl string) string {
|
||||
|
|
|
|||
|
|
@ -1,18 +1,19 @@
|
|||
package url
|
||||
|
||||
import (
|
||||
"strings"
|
||||
_url "net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func DiscardURLQuery(url string) string {
|
||||
return strings.Split(url, "?")[0]
|
||||
}
|
||||
}
|
||||
|
||||
func GetURLParts(rawURL string) (string, string, error) {
|
||||
func GetURLParts(rawURL string) (string, string, string, error) {
|
||||
u, err := _url.Parse(rawURL)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
return "", "", "", err
|
||||
}
|
||||
return u.Host, u.RequestURI(), nil
|
||||
}
|
||||
// u.Scheme ?
|
||||
return u.Host, u.RawPath, u.RawQuery, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,11 +0,0 @@
|
|||
package utime
|
||||
|
||||
import "time"
|
||||
|
||||
func CurrentTimestamp() int64 {
|
||||
return time.Now().UnixNano() / 1e6
|
||||
}
|
||||
|
||||
func ToMilliseconds(t time.Time) int64 {
|
||||
return t.UnixNano() / 1e6
|
||||
}
|
||||
|
|
@ -1,31 +1,31 @@
|
|||
package cacher
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"crypto/tls"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
|
||||
"openreplay/backend/pkg/storage"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
)
|
||||
|
||||
const MAX_CACHE_DEPTH = 5
|
||||
|
||||
type cacher struct {
|
||||
timeoutMap *timeoutMap // Concurrency implemented
|
||||
s3 *storage.S3 // AWS Docs: "These clients are safe to use concurrently."
|
||||
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
|
||||
rewriter *assets.Rewriter // Read only
|
||||
Errors chan error
|
||||
sizeLimit int
|
||||
timeoutMap *timeoutMap // Concurrency implemented
|
||||
s3 *storage.S3 // AWS Docs: "These clients are safe to use concurrently."
|
||||
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
|
||||
rewriter *assets.Rewriter // Read only
|
||||
Errors chan error
|
||||
sizeLimit int
|
||||
}
|
||||
|
||||
func NewCacher(region string, bucket string, origin string, sizeLimit int) *cacher {
|
||||
|
|
@ -36,26 +36,26 @@ func NewCacher(region string, bucket string, origin string, sizeLimit int) *cach
|
|||
httpClient: &http.Client{
|
||||
Timeout: time.Duration(6) * time.Second,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
},
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
},
|
||||
},
|
||||
rewriter: rewriter,
|
||||
Errors: make(chan error),
|
||||
sizeLimit: sizeLimit,
|
||||
rewriter: rewriter,
|
||||
Errors: make(chan error),
|
||||
sizeLimit: sizeLimit,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cacher) cacheURL(requestURL string, sessionID uint64, depth byte, context string, isJS bool) {
|
||||
if c.timeoutMap.contains(requestURL) {
|
||||
return
|
||||
}
|
||||
c.timeoutMap.add(requestURL)
|
||||
var cachePath string
|
||||
if (isJS) {
|
||||
if isJS {
|
||||
cachePath = assets.GetCachePathForJS(requestURL)
|
||||
} else {
|
||||
cachePath = assets.GetCachePathForAssets(sessionID, requestURL)
|
||||
}
|
||||
if c.timeoutMap.contains(cachePath) {
|
||||
return
|
||||
}
|
||||
c.timeoutMap.add(cachePath)
|
||||
if c.s3.Exists(cachePath) {
|
||||
return
|
||||
}
|
||||
|
|
@ -94,20 +94,19 @@ func (c *cacher) cacheURL(requestURL string, sessionID uint64, depth byte, conte
|
|||
if isCSS {
|
||||
strData = c.rewriter.RewriteCSS(sessionID, requestURL, strData) // TODO: one method for reqrite and return list
|
||||
}
|
||||
|
||||
// TODO: implement in streams
|
||||
|
||||
// TODO: implement in streams
|
||||
err = c.s3.Upload(strings.NewReader(strData), cachePath, contentType, false)
|
||||
if err != nil {
|
||||
c.Errors <- errors.Wrap(err, context)
|
||||
return
|
||||
}
|
||||
c.timeoutMap.add(requestURL)
|
||||
|
||||
if isCSS {
|
||||
if depth > 0 {
|
||||
for _, extractedURL := range assets.ExtractURLsFromCSS(string(data)) {
|
||||
if fullURL, cachable := assets.GetFullCachableURL(requestURL, extractedURL); cachable {
|
||||
go c.cacheURL(fullURL, sessionID, depth-1, context + "\n -> " + fullURL, false)
|
||||
if fullURL, cachable := assets.GetFullCachableURL(requestURL, extractedURL); cachable {
|
||||
go c.cacheURL(fullURL, sessionID, depth-1, context+"\n -> "+fullURL, false)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -5,30 +5,30 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
const MAX_STORAGE_TIME = 18 * time.Hour
|
||||
const MAX_STORAGE_TIME = 24 * time.Hour
|
||||
|
||||
// If problem with cache contention (>=4 core) look at sync.Map
|
||||
|
||||
type timeoutMap struct {
|
||||
mx sync.RWMutex
|
||||
m map[string]time.Time
|
||||
m map[string]time.Time
|
||||
}
|
||||
|
||||
func newTimeoutMap() *timeoutMap {
|
||||
return &timeoutMap{
|
||||
m: make(map[string]time.Time),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *timeoutMap) add(key string) {
|
||||
tm.mx.Lock()
|
||||
defer tm.mx.Unlock()
|
||||
defer tm.mx.Unlock()
|
||||
tm.m[key] = time.Now()
|
||||
}
|
||||
|
||||
func (tm *timeoutMap) contains(key string) bool {
|
||||
tm.mx.RLock()
|
||||
defer tm.mx.RUnlock()
|
||||
defer tm.mx.RUnlock()
|
||||
_, ok := tm.m[key]
|
||||
return ok
|
||||
}
|
||||
|
|
@ -36,7 +36,7 @@ func (tm *timeoutMap) contains(key string) bool {
|
|||
func (tm *timeoutMap) deleteOutdated() {
|
||||
now := time.Now()
|
||||
tm.mx.Lock()
|
||||
defer tm.mx.Unlock()
|
||||
defer tm.mx.Unlock()
|
||||
for key, t := range tm.m {
|
||||
if now.Sub(t) > MAX_STORAGE_TIME {
|
||||
delete(tm.m, key)
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ import (
|
|||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
GROUP_CACHE := env.String("GROUP_CACHE")
|
||||
GROUP_CACHE := env.String("GROUP_CACHE")
|
||||
TOPIC_CACHE := env.String("TOPIC_CACHE")
|
||||
|
||||
cacher := cacher.NewCacher(
|
||||
|
|
@ -29,10 +29,10 @@ func main() {
|
|||
)
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
GROUP_CACHE,
|
||||
[]string{ TOPIC_CACHE },
|
||||
GROUP_CACHE,
|
||||
[]string{TOPIC_CACHE},
|
||||
func(sessionID uint64, message messages.Message, e *types.Meta) {
|
||||
switch msg := message.(type) {
|
||||
switch msg := message.(type) {
|
||||
case *messages.AssetCache:
|
||||
cacher.CacheURL(sessionID, msg.URL)
|
||||
case *messages.ErrorEvent:
|
||||
|
|
@ -47,17 +47,17 @@ func main() {
|
|||
for _, source := range sourceList {
|
||||
cacher.CacheJSFile(source)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
true,
|
||||
)
|
||||
|
||||
|
||||
tick := time.Tick(20 * time.Minute)
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
log.Printf("Cacher service started\n")
|
||||
log.Printf("Cacher service started\n")
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
|
|
@ -74,4 +74,4 @@ func main() {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -74,8 +74,8 @@ func main() {
|
|||
}
|
||||
})
|
||||
},
|
||||
false,
|
||||
)
|
||||
consumer.DisableAutoCommit()
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
|
|
|||
|
|
@ -110,11 +110,11 @@ func (b *builder) buildInputEvent() {
|
|||
|
||||
func (b *builder) handleMessage(message Message, messageID uint64) {
|
||||
timestamp := GetTimestamp(message)
|
||||
if b.timestamp <= timestamp { // unnecessary? TODO: test and remove
|
||||
if b.timestamp < timestamp { // unnecessary? TODO: test and remove
|
||||
b.timestamp = timestamp
|
||||
}
|
||||
|
||||
b.lastProcessedTimestamp = time.Now().UnixNano() / 1e6
|
||||
b.lastProcessedTimestamp = time.Now().UnixMilli()
|
||||
|
||||
// Might happen before the first timestamp.
|
||||
switch msg := message.(type) {
|
||||
|
|
|
|||
|
|
@ -8,12 +8,12 @@ import (
|
|||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"openreplay/backend/pkg/intervals"
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/pkg/intervals"
|
||||
logger "openreplay/backend/pkg/log"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
logger "openreplay/backend/pkg/log"
|
||||
"openreplay/backend/services/ender/builder"
|
||||
)
|
||||
|
||||
|
|
@ -29,24 +29,24 @@ func main() {
|
|||
|
||||
producer := queue.NewProducer()
|
||||
consumer := queue.NewMessageConsumer(
|
||||
GROUP_EVENTS,
|
||||
[]string{
|
||||
GROUP_EVENTS,
|
||||
[]string{
|
||||
env.String("TOPIC_RAW_WEB"),
|
||||
env.String("TOPIC_RAW_IOS"),
|
||||
},
|
||||
},
|
||||
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
|
||||
statsLogger.HandleAndLog(sessionID, meta)
|
||||
builderMap.HandleMessage(sessionID, msg, msg.Meta().Index)
|
||||
},
|
||||
false,
|
||||
)
|
||||
consumer.DisableAutoCommit()
|
||||
|
||||
|
||||
tick := time.Tick(intervals.EVENTS_COMMIT_INTERVAL * time.Millisecond)
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
log.Printf("Ender service started\n")
|
||||
log.Printf("Ender service started\n")
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
|
|
@ -55,8 +55,8 @@ func main() {
|
|||
consumer.CommitBack(intervals.EVENTS_BACK_COMMIT_GAP)
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <- tick:
|
||||
builderMap.IterateReadyMessages(time.Now().UnixNano()/1e6, func(sessionID uint64, readyMsg messages.Message) {
|
||||
case <-tick:
|
||||
builderMap.IterateReadyMessages(time.Now().UnixMilli(), func(sessionID uint64, readyMsg messages.Message) {
|
||||
producer.Produce(TOPIC_TRIGGER, sessionID, messages.Encode(readyMsg))
|
||||
})
|
||||
// TODO: why exactly do we need Flush here and not in any other place?
|
||||
|
|
@ -69,4 +69,3 @@ func main() {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2,55 +2,55 @@ package main
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"errors"
|
||||
"time"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/token"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/token"
|
||||
)
|
||||
|
||||
const FILES_SIZE_LIMIT int64 = 1e7 // 10Mb
|
||||
const FILES_SIZE_LIMIT int64 = 1e7 // 10Mb
|
||||
|
||||
func startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
type request struct {
|
||||
Token string `json:"token"`
|
||||
ProjectKey *string `json:"projectKey"`
|
||||
TrackerVersion string `json:"trackerVersion"`
|
||||
RevID string `json:"revID"`
|
||||
UserUUID *string `json:"userUUID"`
|
||||
Token string `json:"token"`
|
||||
ProjectKey *string `json:"projectKey"`
|
||||
TrackerVersion string `json:"trackerVersion"`
|
||||
RevID string `json:"revID"`
|
||||
UserUUID *string `json:"userUUID"`
|
||||
//UserOS string `json"userOS"` //hardcoded 'MacOS'
|
||||
UserOSVersion string `json:"userOSVersion"`
|
||||
UserDevice string `json:"userDevice"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
UserOSVersion string `json:"userOSVersion"`
|
||||
UserDevice string `json:"userDevice"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
// UserDeviceType uint 0:phone 1:pad 2:tv 3:carPlay 5:mac
|
||||
// “performances”:{
|
||||
// “activeProcessorCount”:8,
|
||||
// “isLowPowerModeEnabled”:0,
|
||||
// “orientation”:0,
|
||||
// “systemUptime”:585430,
|
||||
// “batteryState”:0,
|
||||
// “thermalState”:0,
|
||||
// “batteryLevel”:0,
|
||||
// “processorCount”:8,
|
||||
// “physicalMemory”:17179869184
|
||||
// },
|
||||
// “activeProcessorCount”:8,
|
||||
// “isLowPowerModeEnabled”:0,
|
||||
// “orientation”:0,
|
||||
// “systemUptime”:585430,
|
||||
// “batteryState”:0,
|
||||
// “thermalState”:0,
|
||||
// “batteryLevel”:0,
|
||||
// “processorCount”:8,
|
||||
// “physicalMemory”:17179869184
|
||||
// },
|
||||
}
|
||||
type response struct {
|
||||
Token string `json:"token"`
|
||||
ImagesHashList []string `json:"imagesHashList"`
|
||||
UserUUID string `json:"userUUID"`
|
||||
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
|
||||
SessionID string `json:"sessionID"`
|
||||
Token string `json:"token"`
|
||||
ImagesHashList []string `json:"imagesHashList"`
|
||||
UserUUID string `json:"userUUID"`
|
||||
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
|
||||
SessionID string `json:"sessionID"`
|
||||
}
|
||||
startTime := time.Now()
|
||||
req := &request{}
|
||||
body := http.MaxBytesReader(w, r.Body, JSON_SIZE_LIMIT)
|
||||
//defer body.Close()
|
||||
defer body.Close()
|
||||
if err := json.NewDecoder(body).Decode(req); err != nil {
|
||||
responseWithError(w, http.StatusBadRequest, err)
|
||||
return
|
||||
|
|
@ -85,29 +85,29 @@ func startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
|||
responseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
return
|
||||
}
|
||||
sessionID, err := flaker.Compose(uint64(startTime.UnixNano() / 1e6))
|
||||
sessionID, err := flaker.Compose(uint64(startTime.UnixMilli()))
|
||||
if err != nil {
|
||||
responseWithError(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
|
||||
tokenData = &token.TokenData{sessionID, expTime.UnixNano() / 1e6}
|
||||
tokenData = &token.TokenData{sessionID, expTime.UnixMilli()}
|
||||
|
||||
country := geoIP.ExtractISOCodeFromHTTPRequest(r)
|
||||
|
||||
// The difference with web is mostly here:
|
||||
producer.Produce(TOPIC_RAW_IOS, tokenData.ID, Encode(&IOSSessionStart{
|
||||
Timestamp: req.Timestamp,
|
||||
ProjectID: uint64(p.ProjectID),
|
||||
TrackerVersion: req.TrackerVersion,
|
||||
RevID: req.RevID,
|
||||
UserUUID: userUUID,
|
||||
UserOS: "IOS",
|
||||
UserOSVersion: req.UserOSVersion,
|
||||
UserDevice: MapIOSDevice(req.UserDevice),
|
||||
UserDeviceType: GetIOSDeviceType(req.UserDevice),
|
||||
UserCountry: country,
|
||||
Timestamp: req.Timestamp,
|
||||
ProjectID: uint64(p.ProjectID),
|
||||
TrackerVersion: req.TrackerVersion,
|
||||
RevID: req.RevID,
|
||||
UserUUID: userUUID,
|
||||
UserOS: "IOS",
|
||||
UserOSVersion: req.UserOSVersion,
|
||||
UserDevice: MapIOSDevice(req.UserDevice),
|
||||
UserDeviceType: GetIOSDeviceType(req.UserDevice),
|
||||
UserCountry: country,
|
||||
}))
|
||||
}
|
||||
|
||||
|
|
@ -119,14 +119,13 @@ func startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
responseWithJSON(w, &response{
|
||||
// ImagesHashList: imagesHashList,
|
||||
Token: tokenizer.Compose(*tokenData),
|
||||
UserUUID: userUUID,
|
||||
SessionID: strconv.FormatUint(tokenData.ID, 10),
|
||||
Token: tokenizer.Compose(*tokenData),
|
||||
UserUUID: userUUID,
|
||||
SessionID: strconv.FormatUint(tokenData.ID, 10),
|
||||
BeaconSizeLimit: BEACON_SIZE_LIMIT,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
func pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
sessionData, err := tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
|
|
@ -136,8 +135,6 @@ func pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
|||
pushMessages(w, r, sessionData.ID, TOPIC_RAW_IOS)
|
||||
}
|
||||
|
||||
|
||||
|
||||
func pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
sessionData, err := tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil && err != token.EXPIRED {
|
||||
|
|
@ -145,10 +142,9 @@ func pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
// Check timestamps here?
|
||||
pushMessages(w, r, sessionData.ID,TOPIC_RAW_IOS)
|
||||
pushMessages(w, r, sessionData.ID, TOPIC_RAW_IOS)
|
||||
}
|
||||
|
||||
|
||||
func imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
log.Printf("recieved imagerequest")
|
||||
|
||||
|
|
@ -159,16 +155,16 @@ func imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
r.Body = http.MaxBytesReader(w, r.Body, FILES_SIZE_LIMIT)
|
||||
// defer r.Body.Close()
|
||||
defer r.Body.Close()
|
||||
err = r.ParseMultipartForm(1e6) // ~1Mb
|
||||
if err == http.ErrNotMultipart || err == http.ErrMissingBoundary {
|
||||
responseWithError(w, http.StatusUnsupportedMediaType, err)
|
||||
// } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB
|
||||
// } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB
|
||||
} else if err != nil {
|
||||
responseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
}
|
||||
|
||||
if (r.MultipartForm == nil) {
|
||||
if r.MultipartForm == nil {
|
||||
responseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed"))
|
||||
}
|
||||
|
||||
|
|
@ -177,7 +173,7 @@ func imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
prefix := r.MultipartForm.Value["projectKey"][0] + "/" + strconv.FormatUint(sessionData.ID, 10) + "/"
|
||||
prefix := r.MultipartForm.Value["projectKey"][0] + "/" + strconv.FormatUint(sessionData.ID, 10) + "/"
|
||||
|
||||
for _, fileHeaderList := range r.MultipartForm.File {
|
||||
for _, fileHeader := range fileHeaderList {
|
||||
|
|
@ -187,7 +183,7 @@ func imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
key := prefix + fileHeader.Filename
|
||||
log.Printf("Uploading image... %v", key)
|
||||
go func() { //TODO: mime type from header
|
||||
go func() { //TODO: mime type from header
|
||||
if err := s3.Upload(file, key, "image/jpeg", false); err != nil {
|
||||
log.Printf("Upload ios screen error. %v", err)
|
||||
}
|
||||
|
|
@ -11,8 +11,8 @@ import (
|
|||
"time"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/token"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/token"
|
||||
)
|
||||
|
||||
func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -30,18 +30,18 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
|||
UserID string `json:"userID"`
|
||||
}
|
||||
type response struct {
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Delay int64 `json:"delay"`
|
||||
Token string `json:"token"`
|
||||
UserUUID string `json:"userUUID"`
|
||||
SessionID string `json:"sessionID"`
|
||||
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Delay int64 `json:"delay"`
|
||||
Token string `json:"token"`
|
||||
UserUUID string `json:"userUUID"`
|
||||
SessionID string `json:"sessionID"`
|
||||
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
req := &request{}
|
||||
body := http.MaxBytesReader(w, r.Body, JSON_SIZE_LIMIT) // what if Body == nil?? // use r.ContentLength to return specific error?
|
||||
//defer body.Close()
|
||||
defer body.Close()
|
||||
if err := json.NewDecoder(body).Decode(req); err != nil {
|
||||
responseWithError(w, http.StatusBadRequest, err)
|
||||
return
|
||||
|
|
@ -76,14 +76,14 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
|||
responseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
return
|
||||
}
|
||||
sessionID, err := flaker.Compose(uint64(startTime.UnixNano() / 1e6))
|
||||
sessionID, err := flaker.Compose(uint64(startTime.UnixMilli()))
|
||||
if err != nil {
|
||||
responseWithError(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
|
||||
tokenData = &token.TokenData{sessionID, expTime.UnixNano() / 1e6}
|
||||
tokenData = &token.TokenData{sessionID, expTime.UnixMilli()}
|
||||
|
||||
country := geoIP.ExtractISOCodeFromHTTPRequest(r)
|
||||
producer.Produce(TOPIC_RAW_WEB, tokenData.ID, Encode(&SessionStart{
|
||||
|
|
@ -102,17 +102,17 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
|||
UserCountry: country,
|
||||
UserDeviceMemorySize: req.DeviceMemory,
|
||||
UserDeviceHeapSize: req.JsHeapSizeLimit,
|
||||
UserID: req.UserID,
|
||||
UserID: req.UserID,
|
||||
}))
|
||||
}
|
||||
|
||||
//delayDuration := time.Now().Sub(startTime)
|
||||
responseWithJSON(w, &response{
|
||||
//Timestamp: startTime.UnixNano() / 1e6,
|
||||
//Delay: delayDuration.Nanoseconds() / 1e6,
|
||||
Token: tokenizer.Compose(*tokenData),
|
||||
UserUUID: userUUID,
|
||||
SessionID: strconv.FormatUint(tokenData.ID, 10),
|
||||
//Timestamp: startTime.UnixMilli(),
|
||||
//Delay: delayDuration.Milliseconds(),
|
||||
Token: tokenizer.Compose(*tokenData),
|
||||
UserUUID: userUUID,
|
||||
SessionID: strconv.FormatUint(tokenData.ID, 10),
|
||||
BeaconSizeLimit: BEACON_SIZE_LIMIT,
|
||||
})
|
||||
}
|
||||
|
|
@ -124,7 +124,7 @@ func pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
body := http.MaxBytesReader(w, r.Body, BEACON_SIZE_LIMIT)
|
||||
//defer body.Close()
|
||||
defer body.Close()
|
||||
buf, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
responseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
|
|
@ -248,4 +248,4 @@ func notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
|||
log.Printf("Unable to insert Unstarted Session: %v\n", err)
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
|
@ -9,11 +9,11 @@ import (
|
|||
gzip "github.com/klauspost/pgzip"
|
||||
)
|
||||
|
||||
const JSON_SIZE_LIMIT int64 = 1e3 // 1Kb
|
||||
const JSON_SIZE_LIMIT int64 = 1e3 // 1Kb
|
||||
|
||||
func pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) {
|
||||
body := http.MaxBytesReader(w, r.Body, BEACON_SIZE_LIMIT)
|
||||
//defer body.Close()
|
||||
defer body.Close()
|
||||
var reader io.ReadCloser
|
||||
var err error
|
||||
switch r.Header.Get("Content-Encoding") {
|
||||
|
|
|
|||
138
backend/services/http/ios-device.go
Normal file
138
backend/services/http/ios-device.go
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func MapIOSDevice(identifier string) string {
|
||||
switch identifier {
|
||||
case "iPod5,1":
|
||||
return "iPod touch (5th generation)"
|
||||
case "iPod7,1":
|
||||
return "iPod touch (6th generation)"
|
||||
case "iPod9,1":
|
||||
return "iPod touch (7th generation)"
|
||||
case "iPhone3,1", "iPhone3,2", "iPhone3,3":
|
||||
return "iPhone 4"
|
||||
case "iPhone4,1":
|
||||
return "iPhone 4s"
|
||||
case "iPhone5,1", "iPhone5,2":
|
||||
return "iPhone 5"
|
||||
case "iPhone5,3", "iPhone5,4":
|
||||
return "iPhone 5c"
|
||||
case "iPhone6,1", "iPhone6,2":
|
||||
return "iPhone 5s"
|
||||
case "iPhone7,2":
|
||||
return "iPhone 6"
|
||||
case "iPhone7,1":
|
||||
return "iPhone 6 Plus"
|
||||
case "iPhone8,1":
|
||||
return "iPhone 6s"
|
||||
case "iPhone8,2":
|
||||
return "iPhone 6s Plus"
|
||||
case "iPhone8,4":
|
||||
return "iPhone SE"
|
||||
case "iPhone9,1", "iPhone9,3":
|
||||
return "iPhone 7"
|
||||
case "iPhone9,2", "iPhone9,4":
|
||||
return "iPhone 7 Plus"
|
||||
case "iPhone10,1", "iPhone10,4":
|
||||
return "iPhone 8"
|
||||
case "iPhone10,2", "iPhone10,5":
|
||||
return "iPhone 8 Plus"
|
||||
case "iPhone10,3", "iPhone10,6":
|
||||
return "iPhone X"
|
||||
case "iPhone11,2":
|
||||
return "iPhone XS"
|
||||
case "iPhone11,4", "iPhone11,6":
|
||||
return "iPhone XS Max"
|
||||
case "iPhone11,8":
|
||||
return "iPhone XR"
|
||||
case "iPhone12,1":
|
||||
return "iPhone 11"
|
||||
case "iPhone12,3":
|
||||
return "iPhone 11 Pro"
|
||||
case "iPhone12,5":
|
||||
return "iPhone 11 Pro Max"
|
||||
case "iPhone12,8":
|
||||
return "iPhone SE (2nd generation)"
|
||||
case "iPhone13,1":
|
||||
return "iPhone 12 mini"
|
||||
case "iPhone13,2":
|
||||
return "iPhone 12"
|
||||
case "iPhone13,3":
|
||||
return "iPhone 12 Pro"
|
||||
case "iPhone13,4":
|
||||
return "iPhone 12 Pro Max"
|
||||
case "iPad2,1", "iPad2,2", "iPad2,3", "iPad2,4":
|
||||
return "iPad 2"
|
||||
case "iPad3,1", "iPad3,2", "iPad3,3":
|
||||
return "iPad (3rd generation)"
|
||||
case "iPad3,4", "iPad3,5", "iPad3,6":
|
||||
return "iPad (4th generation)"
|
||||
case "iPad6,11", "iPad6,12":
|
||||
return "iPad (5th generation)"
|
||||
case "iPad7,5", "iPad7,6":
|
||||
return "iPad (6th generation)"
|
||||
case "iPad7,11", "iPad7,12":
|
||||
return "iPad (7th generation)"
|
||||
case "iPad11,6", "iPad11,7":
|
||||
return "iPad (8th generation)"
|
||||
case "iPad4,1", "iPad4,2", "iPad4,3":
|
||||
return "iPad Air"
|
||||
case "iPad5,3", "iPad5,4":
|
||||
return "iPad Air 2"
|
||||
case "iPad11,3", "iPad11,4":
|
||||
return "iPad Air (3rd generation)"
|
||||
case "iPad13,1", "iPad13,2":
|
||||
return "iPad Air (4th generation)"
|
||||
case "iPad2,5", "iPad2,6", "iPad2,7":
|
||||
return "iPad mini"
|
||||
case "iPad4,4", "iPad4,5", "iPad4,6":
|
||||
return "iPad mini 2"
|
||||
case "iPad4,7", "iPad4,8", "iPad4,9":
|
||||
return "iPad mini 3"
|
||||
case "iPad5,1", "iPad5,2":
|
||||
return "iPad mini 4"
|
||||
case "iPad11,1", "iPad11,2":
|
||||
return "iPad mini (5th generation)"
|
||||
case "iPad6,3", "iPad6,4":
|
||||
return "iPad Pro (9.7-inch)"
|
||||
case "iPad7,3", "iPad7,4":
|
||||
return "iPad Pro (10.5-inch)"
|
||||
case "iPad8,1", "iPad8,2", "iPad8,3", "iPad8,4":
|
||||
return "iPad Pro (11-inch) (1st generation)"
|
||||
case "iPad8,9", "iPad8,10":
|
||||
return "iPad Pro (11-inch) (2nd generation)"
|
||||
case "iPad6,7", "iPad6,8":
|
||||
return "iPad Pro (12.9-inch) (1st generation)"
|
||||
case "iPad7,1", "iPad7,2":
|
||||
return "iPad Pro (12.9-inch) (2nd generation)"
|
||||
case "iPad8,5", "iPad8,6", "iPad8,7", "iPad8,8":
|
||||
return "iPad Pro (12.9-inch) (3rd generation)"
|
||||
case "iPad8,11", "iPad8,12":
|
||||
return "iPad Pro (12.9-inch) (4th generation)"
|
||||
case "AppleTV5,3":
|
||||
return "Apple TV"
|
||||
case "AppleTV6,2":
|
||||
return "Apple TV 4K"
|
||||
case "AudioAccessory1,1":
|
||||
return "HomePod"
|
||||
case "AudioAccessory5,1":
|
||||
return "HomePod mini"
|
||||
case "i386", "x86_64":
|
||||
return "Simulator"
|
||||
default:
|
||||
return identifier
|
||||
}
|
||||
}
|
||||
|
||||
func GetIOSDeviceType(identifier string) string {
|
||||
if strings.Contains(identifier, "iPhone") {
|
||||
return "mobile" //"phone"
|
||||
}
|
||||
if strings.Contains(identifier, "iPad") {
|
||||
return "tablet"
|
||||
}
|
||||
return "other"
|
||||
}
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func MapIOSDevice(identifier string) string {
|
||||
switch identifier {
|
||||
case "iPod5,1": return "iPod touch (5th generation)"
|
||||
case "iPod7,1": return "iPod touch (6th generation)"
|
||||
case "iPod9,1": return "iPod touch (7th generation)"
|
||||
case "iPhone3,1", "iPhone3,2", "iPhone3,3": return "iPhone 4"
|
||||
case "iPhone4,1": return "iPhone 4s"
|
||||
case "iPhone5,1", "iPhone5,2": return "iPhone 5"
|
||||
case "iPhone5,3", "iPhone5,4": return "iPhone 5c"
|
||||
case "iPhone6,1", "iPhone6,2": return "iPhone 5s"
|
||||
case "iPhone7,2": return "iPhone 6"
|
||||
case "iPhone7,1": return "iPhone 6 Plus"
|
||||
case "iPhone8,1": return "iPhone 6s"
|
||||
case "iPhone8,2": return "iPhone 6s Plus"
|
||||
case "iPhone8,4": return "iPhone SE"
|
||||
case "iPhone9,1", "iPhone9,3": return "iPhone 7"
|
||||
case "iPhone9,2", "iPhone9,4": return "iPhone 7 Plus"
|
||||
case "iPhone10,1", "iPhone10,4": return "iPhone 8"
|
||||
case "iPhone10,2", "iPhone10,5": return "iPhone 8 Plus"
|
||||
case "iPhone10,3", "iPhone10,6": return "iPhone X"
|
||||
case "iPhone11,2": return "iPhone XS"
|
||||
case "iPhone11,4", "iPhone11,6": return "iPhone XS Max"
|
||||
case "iPhone11,8": return "iPhone XR"
|
||||
case "iPhone12,1": return "iPhone 11"
|
||||
case "iPhone12,3": return "iPhone 11 Pro"
|
||||
case "iPhone12,5": return "iPhone 11 Pro Max"
|
||||
case "iPhone12,8": return "iPhone SE (2nd generation)"
|
||||
case "iPhone13,1": return "iPhone 12 mini"
|
||||
case "iPhone13,2": return "iPhone 12"
|
||||
case "iPhone13,3": return "iPhone 12 Pro"
|
||||
case "iPhone13,4": return "iPhone 12 Pro Max"
|
||||
case "iPad2,1", "iPad2,2", "iPad2,3", "iPad2,4":return "iPad 2"
|
||||
case "iPad3,1", "iPad3,2", "iPad3,3": return "iPad (3rd generation)"
|
||||
case "iPad3,4", "iPad3,5", "iPad3,6": return "iPad (4th generation)"
|
||||
case "iPad6,11", "iPad6,12": return "iPad (5th generation)"
|
||||
case "iPad7,5", "iPad7,6": return "iPad (6th generation)"
|
||||
case "iPad7,11", "iPad7,12": return "iPad (7th generation)"
|
||||
case "iPad11,6", "iPad11,7": return "iPad (8th generation)"
|
||||
case "iPad4,1", "iPad4,2", "iPad4,3": return "iPad Air"
|
||||
case "iPad5,3", "iPad5,4": return "iPad Air 2"
|
||||
case "iPad11,3", "iPad11,4": return "iPad Air (3rd generation)"
|
||||
case "iPad13,1", "iPad13,2": return "iPad Air (4th generation)"
|
||||
case "iPad2,5", "iPad2,6", "iPad2,7": return "iPad mini"
|
||||
case "iPad4,4", "iPad4,5", "iPad4,6": return "iPad mini 2"
|
||||
case "iPad4,7", "iPad4,8", "iPad4,9": return "iPad mini 3"
|
||||
case "iPad5,1", "iPad5,2": return "iPad mini 4"
|
||||
case "iPad11,1", "iPad11,2": return "iPad mini (5th generation)"
|
||||
case "iPad6,3", "iPad6,4": return "iPad Pro (9.7-inch)"
|
||||
case "iPad7,3", "iPad7,4": return "iPad Pro (10.5-inch)"
|
||||
case "iPad8,1", "iPad8,2", "iPad8,3", "iPad8,4":return "iPad Pro (11-inch) (1st generation)"
|
||||
case "iPad8,9", "iPad8,10": return "iPad Pro (11-inch) (2nd generation)"
|
||||
case "iPad6,7", "iPad6,8": return "iPad Pro (12.9-inch) (1st generation)"
|
||||
case "iPad7,1", "iPad7,2": return "iPad Pro (12.9-inch) (2nd generation)"
|
||||
case "iPad8,5", "iPad8,6", "iPad8,7", "iPad8,8":return "iPad Pro (12.9-inch) (3rd generation)"
|
||||
case "iPad8,11", "iPad8,12": return "iPad Pro (12.9-inch) (4th generation)"
|
||||
case "AppleTV5,3": return "Apple TV"
|
||||
case "AppleTV6,2": return "Apple TV 4K"
|
||||
case "AudioAccessory1,1": return "HomePod"
|
||||
case "AudioAccessory5,1": return "HomePod mini"
|
||||
case "i386", "x86_64": return "Simulator"
|
||||
default: return identifier
|
||||
}
|
||||
}
|
||||
|
||||
func GetIOSDeviceType(identifier string) string {
|
||||
if strings.Contains(identifier, "iPhone") {
|
||||
return "mobile" //"phone"
|
||||
}
|
||||
if strings.Contains(identifier, "iPad") {
|
||||
return "tablet"
|
||||
}
|
||||
return "other"
|
||||
}
|
||||
|
|
@ -10,19 +10,19 @@ import (
|
|||
|
||||
"golang.org/x/net/http2"
|
||||
|
||||
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/storage"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
"openreplay/backend/pkg/token"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
"openreplay/backend/services/http/geoip"
|
||||
"openreplay/backend/services/http/uaparser"
|
||||
|
||||
"openreplay/backend/pkg/pprof"
|
||||
)
|
||||
|
||||
var rewriter *assets.Rewriter
|
||||
|
|
@ -38,12 +38,14 @@ var TOPIC_RAW_WEB string
|
|||
var TOPIC_RAW_IOS string
|
||||
var TOPIC_CACHE string
|
||||
var TOPIC_TRIGGER string
|
||||
|
||||
//var TOPIC_ANALYTICS string
|
||||
var CACHE_ASSESTS bool
|
||||
var BEACON_SIZE_LIMIT int64
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
pprof.StartProfilingServer()
|
||||
|
||||
producer = queue.NewProducer()
|
||||
defer producer.Close(15000)
|
||||
|
|
@ -53,7 +55,7 @@ func main() {
|
|||
TOPIC_TRIGGER = env.String("TOPIC_TRIGGER")
|
||||
//TOPIC_ANALYTICS = env.String("TOPIC_ANALYTICS")
|
||||
rewriter = assets.NewRewriter(env.String("ASSETS_ORIGIN"))
|
||||
pgconn = cache.NewPGCache(postgres.NewConn(env.String("POSTGRES_STRING")), 1000 * 60 * 20)
|
||||
pgconn = cache.NewPGCache(postgres.NewConn(env.String("POSTGRES_STRING")), 1000*60*20)
|
||||
defer pgconn.Close()
|
||||
s3 = storage.NewS3(env.String("AWS_REGION"), env.String("S3_BUCKET_IOS_IMAGES"))
|
||||
tokenizer = token.NewTokenizer(env.String("TOKEN_SECRET"))
|
||||
|
|
@ -70,7 +72,7 @@ func main() {
|
|||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// TODO: agree with specification
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "POST")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type,Authorization")
|
||||
if r.Method == http.MethodOptions {
|
||||
|
|
@ -79,13 +81,12 @@ func main() {
|
|||
return
|
||||
}
|
||||
|
||||
log.Printf("Request: %v - %v ", r.Method, r.URL.Path)
|
||||
|
||||
log.Printf("Request: %v - %v ", r.Method, r.URL.Path)
|
||||
|
||||
switch r.URL.Path {
|
||||
case "/":
|
||||
w.WriteHeader(http.StatusOK)
|
||||
case "/v1/web/not-started":
|
||||
case "/v1/web/not-started":
|
||||
switch r.Method {
|
||||
case http.MethodPost:
|
||||
notStartedHandlerWeb(w, r)
|
||||
|
|
|
|||
|
|
@ -1,12 +0,0 @@
|
|||
package main
|
||||
|
||||
func decodeProjectID(projectID uint64) uint64 {
|
||||
if projectID < 0x10000000000000 || projectID >= 0x20000000000000 {
|
||||
return 0
|
||||
}
|
||||
projectID = (projectID - 0x10000000000000) * 4212451012670231 & 0xfffffffffffff
|
||||
if projectID > 0xffffffff {
|
||||
return 0
|
||||
}
|
||||
return projectID
|
||||
}
|
||||
|
|
@ -1,15 +1,14 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"time"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"openreplay/backend/pkg/utime"
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
|
@ -18,15 +17,14 @@ import (
|
|||
*/
|
||||
|
||||
type bugsnag struct {
|
||||
BugsnagProjectId string // `json:"bugsnag_project_id"`
|
||||
BugsnagProjectId string // `json:"bugsnag_project_id"`
|
||||
AuthorizationToken string // `json:"auth_token"`
|
||||
}
|
||||
|
||||
|
||||
type bugsnagEvent struct {
|
||||
MetaData struct {
|
||||
SpecialInfo struct {
|
||||
AsayerSessionId uint64 `json:"asayerSessionId,string"`
|
||||
AsayerSessionId uint64 `json:"asayerSessionId,string"`
|
||||
OpenReplaySessionToken string `json:"openReplaySessionToken"`
|
||||
} `json:"special_info"`
|
||||
} `json:"metaData"`
|
||||
|
|
@ -38,7 +36,7 @@ type bugsnagEvent struct {
|
|||
|
||||
func (b *bugsnag) Request(c *client) error {
|
||||
sinceTs := c.getLastMessageTimestamp() + 1000 // From next second
|
||||
sinceFormatted := time.Unix(0, int64(sinceTs*1e6)).Format(time.RFC3339)
|
||||
sinceFormatted := time.UnixMilli(int64(sinceTs)).Format(time.RFC3339)
|
||||
requestURL := fmt.Sprintf("https://api.bugsnag.com/projects/%v/events", b.BugsnagProjectId)
|
||||
req, err := http.NewRequest("GET", requestURL, nil)
|
||||
if err != nil {
|
||||
|
|
@ -47,10 +45,10 @@ func (b *bugsnag) Request(c *client) error {
|
|||
q := req.URL.Query()
|
||||
// q.Add("per_page", "100") // Up to a maximum of 30. Default: 30
|
||||
// q.Add("sort", "timestamp") // Default: timestamp (timestamp == ReceivedAt ??)
|
||||
q.Add("direction", "asc") // Default: desc
|
||||
q.Add("direction", "asc") // Default: desc
|
||||
q.Add("full_reports", "true") // Default: false
|
||||
q.Add("filters[event.since][][type]", "eq")
|
||||
q.Add("filters[event.since][][value]", sinceFormatted) // seems like inclusively
|
||||
q.Add("filters[event.since][][type]", "eq")
|
||||
q.Add("filters[event.since][][value]", sinceFormatted) // seems like inclusively
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
authToken := "token " + b.AuthorizationToken
|
||||
|
|
@ -85,7 +83,7 @@ func (b *bugsnag) Request(c *client) error {
|
|||
}
|
||||
sessionID := e.MetaData.SpecialInfo.AsayerSessionId
|
||||
token := e.MetaData.SpecialInfo.OpenReplaySessionToken
|
||||
if sessionID == 0 && token == "" {
|
||||
if sessionID == 0 && token == "" {
|
||||
// c.errChan <- "No AsayerSessionId found. | Message: %v", e
|
||||
continue
|
||||
}
|
||||
|
|
@ -94,16 +92,16 @@ func (b *bugsnag) Request(c *client) error {
|
|||
c.errChan <- err
|
||||
continue
|
||||
}
|
||||
timestamp := uint64(utime.ToMilliseconds(parsedTime))
|
||||
timestamp := uint64(parsedTime.UnixMilli())
|
||||
c.setLastMessageTimestamp(timestamp)
|
||||
c.evChan <- &SessionErrorEvent{
|
||||
SessionID: sessionID,
|
||||
Token: token,
|
||||
Token: token,
|
||||
RawErrorEvent: &messages.RawErrorEvent{
|
||||
Source: "bugsnag",
|
||||
Source: "bugsnag",
|
||||
Timestamp: timestamp,
|
||||
Name: e.Exceptions[0].Message,
|
||||
Payload: string(jsonEvent),
|
||||
Name: e.Exceptions[0].Message,
|
||||
Payload: string(jsonEvent),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,10 +5,10 @@ import (
|
|||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/utime"
|
||||
)
|
||||
|
||||
const MAX_ATTEMPTS_IN_A_ROW = 4
|
||||
|
|
@ -20,10 +20,10 @@ type requester interface {
|
|||
}
|
||||
|
||||
type requestData struct {
|
||||
LastMessageTimestamp uint64 // `json:"lastMessageTimestamp, string"`
|
||||
LastMessageId string
|
||||
LastMessageTimestamp uint64 // `json:"lastMessageTimestamp, string"`
|
||||
LastMessageId string
|
||||
UnsuccessfullAttemptsCount int
|
||||
LastAttemptTimestamp int64
|
||||
LastAttemptTimestamp int64
|
||||
}
|
||||
|
||||
type client struct {
|
||||
|
|
@ -31,19 +31,19 @@ type client struct {
|
|||
requester
|
||||
integration *postgres.Integration
|
||||
// TODO: timeout ?
|
||||
mux sync.Mutex
|
||||
mux sync.Mutex
|
||||
updateChan chan<- postgres.Integration
|
||||
evChan chan<- *SessionErrorEvent
|
||||
errChan chan<- error
|
||||
evChan chan<- *SessionErrorEvent
|
||||
errChan chan<- error
|
||||
}
|
||||
|
||||
type SessionErrorEvent struct {
|
||||
SessionID uint64
|
||||
Token string
|
||||
Token string
|
||||
*messages.RawErrorEvent
|
||||
}
|
||||
|
||||
type ClientMap map[ string ]*client
|
||||
type ClientMap map[string]*client
|
||||
|
||||
func NewClient(i *postgres.Integration, updateChan chan<- postgres.Integration, evChan chan<- *SessionErrorEvent, errChan chan<- error) (*client, error) {
|
||||
c := new(client)
|
||||
|
|
@ -60,15 +60,14 @@ func NewClient(i *postgres.Integration, updateChan chan<- postgres.Integration,
|
|||
// TODO: RequestData manager
|
||||
if c.requestData.LastMessageTimestamp == 0 {
|
||||
// ?
|
||||
c.requestData.LastMessageTimestamp = uint64(utime.CurrentTimestamp() - 24*60*60*1000)
|
||||
c.requestData.LastMessageTimestamp = uint64(time.Now().Add(-time.Hour * 24).UnixMilli())
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
|
||||
// from outside
|
||||
func (c* client) Update(i *postgres.Integration) error {
|
||||
func (c *client) Update(i *postgres.Integration) error {
|
||||
c.mux.Lock()
|
||||
defer c.mux.Unlock()
|
||||
var r requester
|
||||
|
|
@ -111,8 +110,8 @@ func (c *client) getLastMessageTimestamp() uint64 {
|
|||
}
|
||||
func (c *client) setLastMessageId(timestamp uint64, id string) {
|
||||
//if timestamp >= c.requestData.LastMessageTimestamp {
|
||||
c.requestData.LastMessageId = id
|
||||
c.requestData.LastMessageTimestamp = timestamp
|
||||
c.requestData.LastMessageId = id
|
||||
c.requestData.LastMessageTimestamp = timestamp
|
||||
//}
|
||||
}
|
||||
func (c *client) getLastMessageId() string {
|
||||
|
|
@ -128,18 +127,18 @@ func (c *client) Request() {
|
|||
c.mux.Lock()
|
||||
defer c.mux.Unlock()
|
||||
if c.requestData.UnsuccessfullAttemptsCount >= MAX_ATTEMPTS ||
|
||||
(c.requestData.UnsuccessfullAttemptsCount >= MAX_ATTEMPTS_IN_A_ROW &&
|
||||
utime.CurrentTimestamp() - c.requestData.LastAttemptTimestamp < ATTEMPTS_INTERVAL) {
|
||||
(c.requestData.UnsuccessfullAttemptsCount >= MAX_ATTEMPTS_IN_A_ROW &&
|
||||
time.Now().UnixMilli()-c.requestData.LastAttemptTimestamp < ATTEMPTS_INTERVAL) {
|
||||
return
|
||||
}
|
||||
|
||||
c.requestData.LastAttemptTimestamp = utime.CurrentTimestamp()
|
||||
c.requestData.LastAttemptTimestamp = time.Now().UnixMilli()
|
||||
err := c.requester.Request(c)
|
||||
if err != nil {
|
||||
log.Println("ERRROR L139")
|
||||
log.Println(err)
|
||||
c.handleError(err)
|
||||
c.requestData.UnsuccessfullAttemptsCount++;
|
||||
c.requestData.UnsuccessfullAttemptsCount++
|
||||
} else {
|
||||
c.requestData.UnsuccessfullAttemptsCount = 0
|
||||
}
|
||||
|
|
@ -152,5 +151,3 @@ func (c *client) Request() {
|
|||
c.integration.RequestData = rd
|
||||
c.updateChan <- *c.integration
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,38 +1,37 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"encoding/json"
|
||||
"bytes"
|
||||
"time"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/utime"
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
/*
|
||||
We collect Logs. Datadog also has Events
|
||||
|
||||
*/
|
||||
|
||||
type datadog struct {
|
||||
ApplicationKey string //`json:"application_key"`
|
||||
ApiKey string //`json:"api_key"`
|
||||
ApplicationKey string //`json:"application_key"`
|
||||
ApiKey string //`json:"api_key"`
|
||||
}
|
||||
|
||||
type datadogResponce struct {
|
||||
Logs []json.RawMessage
|
||||
Logs []json.RawMessage
|
||||
NextLogId *string
|
||||
Status string
|
||||
Status string
|
||||
}
|
||||
|
||||
type datadogLog struct {
|
||||
Content struct {
|
||||
Timestamp string
|
||||
Message string
|
||||
Timestamp string
|
||||
Message string
|
||||
Attributes struct {
|
||||
Error struct { // Not sure about this
|
||||
Message string
|
||||
|
|
@ -48,10 +47,10 @@ func (d *datadog) makeRequest(nextLogId *string, fromTs uint64, toTs uint64) (*h
|
|||
d.ApplicationKey,
|
||||
)
|
||||
startAt := "null"
|
||||
if nextLogId != nil && *nextLogId != "" {
|
||||
if nextLogId != nil && *nextLogId != "" {
|
||||
startAt = *nextLogId
|
||||
}
|
||||
// Query: status:error/info/warning?
|
||||
// Query: status:error/info/warning?
|
||||
// openReplaySessionToken instead of asayer_session_id
|
||||
jsonBody := fmt.Sprintf(`{
|
||||
"limit": 1000,
|
||||
|
|
@ -72,8 +71,8 @@ func (d *datadog) makeRequest(nextLogId *string, fromTs uint64, toTs uint64) (*h
|
|||
}
|
||||
|
||||
func (d *datadog) Request(c *client) error {
|
||||
fromTs := c.getLastMessageTimestamp() + 1 // From next millisecond
|
||||
toTs := uint64(utime.CurrentTimestamp())
|
||||
fromTs := c.getLastMessageTimestamp() + 1 // From next millisecond
|
||||
toTs := uint64(time.Now().UnixMilli())
|
||||
var nextLogId *string
|
||||
for {
|
||||
req, err := d.makeRequest(nextLogId, fromTs, toTs)
|
||||
|
|
@ -111,16 +110,16 @@ func (d *datadog) Request(c *client) error {
|
|||
c.errChan <- err
|
||||
continue
|
||||
}
|
||||
timestamp := uint64(utime.ToMilliseconds(parsedTime))
|
||||
timestamp := uint64(parsedTime.UnixMilli())
|
||||
c.setLastMessageTimestamp(timestamp)
|
||||
c.evChan <- &SessionErrorEvent{
|
||||
//SessionID: sessionID,
|
||||
Token: token,
|
||||
RawErrorEvent: &messages.RawErrorEvent{
|
||||
Source: "datadog",
|
||||
Source: "datadog",
|
||||
Timestamp: timestamp,
|
||||
Name: ddLog.Content.Attributes.Error.Message,
|
||||
Payload: string(jsonLog),
|
||||
Name: ddLog.Content.Attributes.Error.Message,
|
||||
Payload: string(jsonLog),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -129,4 +128,4 @@ func (d *datadog) Request(c *client) error {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ import (
|
|||
"time"
|
||||
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/utime"
|
||||
)
|
||||
|
||||
type elasticsearch struct {
|
||||
|
|
@ -164,7 +163,7 @@ func (es *elasticsearch) Request(c *client) error {
|
|||
c.errChan <- err
|
||||
continue
|
||||
}
|
||||
timestamp := uint64(utime.ToMilliseconds(esLog.Time))
|
||||
timestamp := uint64(esLog.Time.UnixMilli())
|
||||
c.setLastMessageTimestamp(timestamp)
|
||||
|
||||
var sessionID uint64
|
||||
|
|
|
|||
|
|
@ -2,25 +2,24 @@ package integration
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
We use insights-api for query. They also have Logs and Events
|
||||
We use insights-api for query. They also have Logs and Events
|
||||
*/
|
||||
|
||||
|
||||
// TODO: Eu/us
|
||||
type newrelic struct {
|
||||
ApplicationId string //`json:"application_id"`
|
||||
XQueryKey string //`json:"x_query_key"`
|
||||
ApplicationId string //`json:"application_id"`
|
||||
XQueryKey string //`json:"x_query_key"`
|
||||
}
|
||||
|
||||
// TODO: Recheck
|
||||
|
|
@ -34,14 +33,14 @@ type newrelicResponce struct {
|
|||
type newrelicEvent struct {
|
||||
//AsayerSessionID uint64 `json:"asayer_session_id,string"` // string/int decoder?
|
||||
OpenReplaySessionToken string `json:"openReplaySessionToken"`
|
||||
ErrorClass string `json:"error.class"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
ErrorClass string `json:"error.class"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
func (nr *newrelic) Request(c *client) error {
|
||||
sinceTs := c.getLastMessageTimestamp() + 1000 // From next second
|
||||
// In docs - format "yyyy-mm-dd HH:MM:ss", but time.RFC3339 works fine too
|
||||
sinceFormatted := time.Unix(0, int64(sinceTs*1e6)).Format(time.RFC3339)
|
||||
sinceFormatted := time.UnixMilli(int64(sinceTs)).Format(time.RFC3339)
|
||||
// US/EU endpoint ??
|
||||
requestURL := fmt.Sprintf("https://insights-api.eu.newrelic.com/v1/accounts/%v/query", nr.ApplicationId)
|
||||
req, err := http.NewRequest("GET", requestURL, nil)
|
||||
|
|
@ -64,11 +63,10 @@ func (nr *newrelic) Request(c *client) error {
|
|||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
|
||||
// 401 (unauthorised) if wrong XQueryKey/deploymentServer is wrong or 403 (Forbidden) if ApplicationId is wrong
|
||||
// 400 if Query has problems
|
||||
if resp.StatusCode >= 400 {
|
||||
io.Copy(ioutil.Discard, resp.Body) // Read the body to free socket
|
||||
io.Copy(ioutil.Discard, resp.Body) // Read the body to free socket
|
||||
return fmt.Errorf("Newrelic: server respond with the code %v| Request: ", resp.StatusCode, *req)
|
||||
}
|
||||
// Pagination depending on returning metadata ?
|
||||
|
|
@ -92,13 +90,13 @@ func (nr *newrelic) Request(c *client) error {
|
|||
c.evChan <- &SessionErrorEvent{
|
||||
Token: e.OpenReplaySessionToken,
|
||||
RawErrorEvent: &messages.RawErrorEvent{
|
||||
Source: "newrelic",
|
||||
Source: "newrelic",
|
||||
Timestamp: e.Timestamp,
|
||||
Name: e.ErrorClass,
|
||||
Payload: string(jsonEvent),
|
||||
Name: e.ErrorClass,
|
||||
Payload: string(jsonEvent),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,44 +1,41 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"fmt"
|
||||
"time"
|
||||
"strconv"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/utime"
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
||||
/*
|
||||
/*
|
||||
They also have different stuff
|
||||
Documentation says:
|
||||
Documentation says:
|
||||
"Note: This endpoint is experimental and may be removed without notice."
|
||||
*/
|
||||
|
||||
type sentry struct {
|
||||
OrganizationSlug string // `json:"organization_slug"`
|
||||
ProjectSlug string // `json:"project_slug"`
|
||||
Token string // `json:"token"`
|
||||
ProjectSlug string // `json:"project_slug"`
|
||||
Token string // `json:"token"`
|
||||
}
|
||||
|
||||
type sentryEvent struct {
|
||||
Tags []struct {
|
||||
Key string
|
||||
Value string `json:"value"`
|
||||
Key string
|
||||
Value string `json:"value"`
|
||||
}
|
||||
DateCreated string `json:"dateCreated"` // or dateReceived ?
|
||||
Title string
|
||||
EventID string `json:"eventID"`
|
||||
DateCreated string `json:"dateCreated"` // or dateReceived ?
|
||||
Title string
|
||||
EventID string `json:"eventID"`
|
||||
}
|
||||
|
||||
|
||||
func (sn *sentry) Request(c *client) error {
|
||||
requestURL := fmt.Sprintf("https://sentry.io/api/0/projects/%v/%v/events/", sn.OrganizationSlug, sn.ProjectSlug)
|
||||
req, err := http.NewRequest("GET", requestURL, nil)
|
||||
|
|
@ -88,9 +85,9 @@ PageLoop:
|
|||
c.errChan <- fmt.Errorf("%v | Event: %v", err, e)
|
||||
continue
|
||||
}
|
||||
timestamp := uint64(utime.ToMilliseconds(parsedTime))
|
||||
timestamp := uint64(parsedTime.UnixMilli())
|
||||
// TODO: not to receive all the messages (use default integration timestamp)
|
||||
if firstEvent { // TODO: reverse range?
|
||||
if firstEvent { // TODO: reverse range?
|
||||
c.setLastMessageId(timestamp, e.EventID)
|
||||
firstEvent = false
|
||||
}
|
||||
|
|
@ -117,12 +114,12 @@ PageLoop:
|
|||
|
||||
c.evChan <- &SessionErrorEvent{
|
||||
SessionID: sessionID,
|
||||
Token: token,
|
||||
Token: token,
|
||||
RawErrorEvent: &messages.RawErrorEvent{
|
||||
Source: "sentry",
|
||||
Source: "sentry",
|
||||
Timestamp: timestamp,
|
||||
Name: e.Title,
|
||||
Payload: string(jsonEvent),
|
||||
Name: e.Title,
|
||||
Payload: string(jsonEvent),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -137,7 +134,7 @@ PageLoop:
|
|||
return fmt.Errorf("Link header format error. Got: '%v'", linkHeader)
|
||||
}
|
||||
|
||||
nextLinkInfo := pagInfo[ 1 ]
|
||||
nextLinkInfo := pagInfo[1]
|
||||
if strings.Contains(nextLinkInfo, `results="false"`) {
|
||||
break
|
||||
}
|
||||
|
|
@ -151,4 +148,4 @@ PageLoop:
|
|||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,22 +1,19 @@
|
|||
package integration
|
||||
|
||||
|
||||
import (
|
||||
"google.golang.org/api/option"
|
||||
"cloud.google.com/go/logging/logadmin"
|
||||
"google.golang.org/api/iterator"
|
||||
|
||||
//"strconv"
|
||||
"encoding/json"
|
||||
"time"
|
||||
"fmt"
|
||||
"context"
|
||||
"google.golang.org/api/option"
|
||||
|
||||
//"strconv"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/utime"
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
||||
// Old: asayerSessionId
|
||||
|
||||
const SD_FILTER_QUERY = `
|
||||
|
|
@ -28,7 +25,7 @@ const SD_FILTER_QUERY = `
|
|||
|
||||
type stackdriver struct {
|
||||
ServiceAccountCredentials string // `json:"service_account_credentials"`
|
||||
LogName string // `json:"log_name"`
|
||||
LogName string // `json:"log_name"`
|
||||
}
|
||||
|
||||
type saCreds struct {
|
||||
|
|
@ -37,10 +34,10 @@ type saCreds struct {
|
|||
|
||||
func (sd *stackdriver) Request(c *client) error {
|
||||
fromTs := c.getLastMessageTimestamp() + 1 // Timestamp is RFC3339Nano, so we take the next millisecond
|
||||
fromFormatted := time.Unix(0, int64(fromTs *1e6)).Format(time.RFC3339Nano)
|
||||
fromFormatted := time.UnixMilli(int64(fromTs)).Format(time.RFC3339Nano)
|
||||
ctx := context.Background()
|
||||
|
||||
var parsedCreds saCreds
|
||||
var parsedCreds saCreds
|
||||
err := json.Unmarshal([]byte(sd.ServiceAccountCredentials), &parsedCreds)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -49,56 +46,56 @@ func (sd *stackdriver) Request(c *client) error {
|
|||
opt := option.WithCredentialsJSON([]byte(sd.ServiceAccountCredentials))
|
||||
client, err := logadmin.NewClient(ctx, parsedCreds.ProjectId, opt)
|
||||
if err != nil {
|
||||
return err
|
||||
return err
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
filter := fmt.Sprintf(SD_FILTER_QUERY, parsedCreds.ProjectId, sd.LogName, fromFormatted)
|
||||
// By default, Entries are listed from oldest to newest.
|
||||
/* ResourceNames(rns []string)
|
||||
"projects/[PROJECT_ID]"
|
||||
"organizations/[ORGANIZATION_ID]"
|
||||
"billingAccounts/[BILLING_ACCOUNT_ID]"
|
||||
"folders/[FOLDER_ID]"
|
||||
*/
|
||||
it := client.Entries(ctx, logadmin.Filter(filter))
|
||||
|
||||
// TODO: Pagination:
|
||||
//pager := iterator.NewPager(it, 1000, "")
|
||||
//nextToken, err := pager.NextPage(&entries)
|
||||
//if nextToken == "" { break }
|
||||
for {
|
||||
e, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filter := fmt.Sprintf(SD_FILTER_QUERY, parsedCreds.ProjectId, sd.LogName, fromFormatted)
|
||||
// By default, Entries are listed from oldest to newest.
|
||||
/* ResourceNames(rns []string)
|
||||
"projects/[PROJECT_ID]"
|
||||
"organizations/[ORGANIZATION_ID]"
|
||||
"billingAccounts/[BILLING_ACCOUNT_ID]"
|
||||
"folders/[FOLDER_ID]"
|
||||
*/
|
||||
it := client.Entries(ctx, logadmin.Filter(filter))
|
||||
|
||||
token := e.Labels["openReplaySessionToken"]
|
||||
// sessionID, err := strconv.ParseUint(strSessionID, 10, 64)
|
||||
// if err != nil {
|
||||
// c.errChan <- err
|
||||
// continue
|
||||
// }
|
||||
jsonEvent, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
c.errChan <- err
|
||||
continue
|
||||
}
|
||||
timestamp := uint64(utime.ToMilliseconds(e.Timestamp))
|
||||
c.setLastMessageTimestamp(timestamp)
|
||||
c.evChan <- &SessionErrorEvent{
|
||||
// TODO: Pagination:
|
||||
//pager := iterator.NewPager(it, 1000, "")
|
||||
//nextToken, err := pager.NextPage(&entries)
|
||||
//if nextToken == "" { break }
|
||||
for {
|
||||
e, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
token := e.Labels["openReplaySessionToken"]
|
||||
// sessionID, err := strconv.ParseUint(strSessionID, 10, 64)
|
||||
// if err != nil {
|
||||
// c.errChan <- err
|
||||
// continue
|
||||
// }
|
||||
jsonEvent, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
c.errChan <- err
|
||||
continue
|
||||
}
|
||||
timestamp := uint64(e.Timestamp.UnixMilli())
|
||||
c.setLastMessageTimestamp(timestamp)
|
||||
c.evChan <- &SessionErrorEvent{
|
||||
//SessionID: sessionID,
|
||||
Token: token,
|
||||
RawErrorEvent: &messages.RawErrorEvent{
|
||||
Source: "stackdriver",
|
||||
Source: "stackdriver",
|
||||
Timestamp: timestamp,
|
||||
Name: e.InsertID, // not sure about that
|
||||
Payload: string(jsonEvent),
|
||||
Name: e.InsertID, // not sure about that
|
||||
Payload: string(jsonEvent),
|
||||
},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,20 +1,19 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/utime"
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
The maximum value for limit is 10,000 messages or 100 MB in total message size,
|
||||
/*
|
||||
The maximum value for limit is 10,000 messages or 100 MB in total message size,
|
||||
which means the query may return less than 10,000 messages if you exceed the size limit.
|
||||
|
||||
API Documentation: https://help.sumologic.com/APIs/Search-Job-API/About-the-Search-Job-API
|
||||
|
|
@ -22,31 +21,30 @@ import (
|
|||
const SL_LIMIT = 10000
|
||||
|
||||
type sumologic struct {
|
||||
AccessId string // `json:"access_id"`
|
||||
AccessKey string // `json:"access_key"`
|
||||
cookies []*http.Cookie
|
||||
AccessId string // `json:"access_id"`
|
||||
AccessKey string // `json:"access_key"`
|
||||
cookies []*http.Cookie
|
||||
}
|
||||
|
||||
|
||||
type sumplogicJobResponce struct {
|
||||
Id string
|
||||
}
|
||||
|
||||
type sumologicJobStatusResponce struct {
|
||||
State string
|
||||
State string
|
||||
MessageCount int
|
||||
//PendingErrors []string
|
||||
}
|
||||
|
||||
type sumologicResponce struct {
|
||||
Messages [] struct {
|
||||
Messages []struct {
|
||||
Map json.RawMessage
|
||||
}
|
||||
}
|
||||
|
||||
type sumologicEvent struct {
|
||||
Timestamp uint64 `json:"_messagetime,string"`
|
||||
Raw string `json:"_raw"`
|
||||
Raw string `json:"_raw"`
|
||||
}
|
||||
|
||||
func (sl *sumologic) deleteJob(jobId string, errChan chan<- error) {
|
||||
|
|
@ -68,10 +66,9 @@ func (sl *sumologic) deleteJob(jobId string, errChan chan<- error) {
|
|||
resp.Body.Close()
|
||||
}
|
||||
|
||||
|
||||
func (sl *sumologic) Request(c *client) error {
|
||||
fromTs := c.getLastMessageTimestamp() + 1 // From next millisecond
|
||||
toTs := utime.CurrentTimestamp()
|
||||
toTs := time.Now().UnixMilli()
|
||||
requestURL := fmt.Sprintf("https://api.%vsumologic.com/api/v1/search/jobs", "eu.") // deployment server??
|
||||
jsonBody := fmt.Sprintf(`{
|
||||
"query": "\"openReplaySessionToken=\" AND (*error* OR *fail* OR *exception*)",
|
||||
|
|
@ -132,7 +129,7 @@ func (sl *sumologic) Request(c *client) error {
|
|||
|
||||
tick := time.Tick(5 * time.Second)
|
||||
for {
|
||||
<- tick
|
||||
<-tick
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err // TODO: retry, counter/timeout
|
||||
|
|
@ -147,12 +144,12 @@ func (sl *sumologic) Request(c *client) error {
|
|||
}
|
||||
if jobStatus.State == "DONE GATHERING RESULTS" {
|
||||
offset := 0
|
||||
for ;offset < jobStatus.MessageCount; {
|
||||
for offset < jobStatus.MessageCount {
|
||||
requestURL = fmt.Sprintf(
|
||||
"https://api.%vsumologic.com/api/v1/search/jobs/%v/messages?offset=%v&limit=%v",
|
||||
"eu.",
|
||||
jobResponce.Id,
|
||||
offset,
|
||||
"https://api.%vsumologic.com/api/v1/search/jobs/%v/messages?offset=%v&limit=%v",
|
||||
"eu.",
|
||||
jobResponce.Id,
|
||||
offset,
|
||||
SL_LIMIT,
|
||||
)
|
||||
req, err = http.NewRequest("GET", requestURL, nil)
|
||||
|
|
@ -190,17 +187,17 @@ func (sl *sumologic) Request(c *client) error {
|
|||
}
|
||||
name := e.Raw
|
||||
if len(name) > 20 {
|
||||
name = name[:20] // not sure about that
|
||||
name = name[:20] // not sure about that
|
||||
}
|
||||
c.setLastMessageTimestamp(e.Timestamp)
|
||||
c.evChan <- &SessionErrorEvent{
|
||||
//SessionID: sessionID,
|
||||
Token: token,
|
||||
RawErrorEvent: &messages.RawErrorEvent{
|
||||
Source: "sumologic",
|
||||
Source: "sumologic",
|
||||
Timestamp: e.Timestamp,
|
||||
Name: name,
|
||||
Payload: string(m.Map), //e.Raw ?
|
||||
Name: name,
|
||||
Payload: string(m.Map), //e.Raw ?
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -209,11 +206,11 @@ func (sl *sumologic) Request(c *client) error {
|
|||
}
|
||||
break
|
||||
}
|
||||
if jobStatus.State != "NOT STARTED" &&
|
||||
if jobStatus.State != "NOT STARTED" &&
|
||||
jobStatus.State != "GATHERING RESULTS" {
|
||||
// error
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
|
|
@ -10,67 +10,64 @@ import (
|
|||
"syscall"
|
||||
|
||||
"openreplay/backend/pkg/env"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
FS_DIR := env.String("FS_DIR");
|
||||
FS_DIR := env.String("FS_DIR")
|
||||
if _, err := os.Stat(FS_DIR); os.IsNotExist(err) {
|
||||
log.Fatalf("%v doesn't exist. %v", FS_DIR, err)
|
||||
}
|
||||
|
||||
writer := NewWriter(env.Uint16("FS_ULIMIT"), FS_DIR)
|
||||
|
||||
count := 0
|
||||
count := 0
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
env.String("GROUP_SINK"),
|
||||
[]string{
|
||||
[]string{
|
||||
env.String("TOPIC_RAW_WEB"),
|
||||
env.String("TOPIC_RAW_IOS"),
|
||||
},
|
||||
func(sessionID uint64, message Message, _ *types.Meta) {
|
||||
//typeID, err := GetMessageTypeID(value)
|
||||
// if err != nil {
|
||||
// log.Printf("Message type decoding error: %v", err)
|
||||
// return
|
||||
// }
|
||||
typeID := message.Meta().TypeID
|
||||
if !IsReplayerType(typeID) {
|
||||
return
|
||||
}
|
||||
},
|
||||
func(sessionID uint64, message Message, _ *types.Meta) {
|
||||
//typeID, err := GetMessageTypeID(value)
|
||||
// if err != nil {
|
||||
// log.Printf("Message type decoding error: %v", err)
|
||||
// return
|
||||
// }
|
||||
typeID := message.Meta().TypeID
|
||||
if !IsReplayerType(typeID) {
|
||||
return
|
||||
}
|
||||
|
||||
count++
|
||||
count++
|
||||
|
||||
value := message.Encode()
|
||||
var data []byte
|
||||
if IsIOSType(typeID) {
|
||||
data = value
|
||||
} else {
|
||||
value := message.Encode()
|
||||
var data []byte
|
||||
if IsIOSType(typeID) {
|
||||
data = value
|
||||
} else {
|
||||
data = make([]byte, len(value)+8)
|
||||
copy(data[8:], value[:])
|
||||
binary.LittleEndian.PutUint64(data[0:], message.Meta().Index)
|
||||
}
|
||||
if err := writer.Write(sessionID, data); err != nil {
|
||||
}
|
||||
if err := writer.Write(sessionID, data); err != nil {
|
||||
log.Printf("Writer error: %v\n", err)
|
||||
}
|
||||
},
|
||||
},
|
||||
false,
|
||||
)
|
||||
consumer.DisableAutoCommit()
|
||||
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
tick := time.Tick(30 * time.Second)
|
||||
tick := time.Tick(30 * time.Second)
|
||||
|
||||
log.Printf("Sink service started\n")
|
||||
log.Printf("Sink service started\n")
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
|
|
@ -85,7 +82,7 @@ func main() {
|
|||
|
||||
log.Printf("%v messages during 30 sec", count)
|
||||
count = 0
|
||||
|
||||
|
||||
consumer.Commit()
|
||||
default:
|
||||
err := consumer.ConsumeNext()
|
||||
|
|
@ -96,4 +93,3 @@ func main() {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,23 +1,23 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"log"
|
||||
"time"
|
||||
"strconv"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
)
|
||||
|
||||
const DELETE_TIMEOUT = 12 * time.Hour;
|
||||
const DELETE_TIMEOUT = 48 * time.Hour
|
||||
|
||||
func cleanDir(dirname string) {
|
||||
files, err := ioutil.ReadDir(dirname)
|
||||
if err != nil {
|
||||
log.Printf("Cannot read file directory. %v", err)
|
||||
return
|
||||
}
|
||||
files, err := ioutil.ReadDir(dirname)
|
||||
if err != nil {
|
||||
log.Printf("Cannot read file directory. %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
name := f.Name()
|
||||
|
|
@ -27,8 +27,9 @@ func cleanDir(dirname string) {
|
|||
continue
|
||||
}
|
||||
ts := int64(flakeid.ExtractTimestamp(id))
|
||||
if time.Unix(ts/1000, 0).Add(DELETE_TIMEOUT).Before(time.Now()) {
|
||||
if time.UnixMilli(ts).Add(DELETE_TIMEOUT).Before(time.Now()) {
|
||||
// returns a error. Don't log it sinse it can be race condition between worker instances
|
||||
os.Remove(dirname + "/" + name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,45 +2,41 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/pkg/storage"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/storage"
|
||||
)
|
||||
|
||||
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
|
||||
storageWeb := storage.NewS3(env.String("AWS_REGION_WEB"), env.String("S3_BUCKET_WEB"))
|
||||
//storageIos := storage.NewS3(env.String("AWS_REGION_IOS"), env.String("S3_BUCKET_IOS"))
|
||||
storage := storage.NewS3(env.String("AWS_REGION_WEB"), env.String("S3_BUCKET_WEB"))
|
||||
FS_DIR := env.String("FS_DIR")
|
||||
FS_CLEAN_HRS := env.Int("FS_CLEAN_HRS")
|
||||
|
||||
var uploadKey func(string, int, *storage.S3)
|
||||
uploadKey = func(key string, retryCount int, s *storage.S3) {
|
||||
var uploadKey func(string, int)
|
||||
uploadKey = func(key string, retryCount int) {
|
||||
if retryCount <= 0 {
|
||||
return;
|
||||
return
|
||||
}
|
||||
file, err := os.Open(FS_DIR + "/" + key)
|
||||
defer file.Close()
|
||||
if err != nil {
|
||||
log.Printf("File error: %v; Will retry %v more time(s)\n", err, retryCount)
|
||||
time.AfterFunc(2*time.Minute, func() {
|
||||
uploadKey(key, retryCount - 1, s)
|
||||
uploadKey(key, retryCount-1)
|
||||
})
|
||||
} else {
|
||||
if err := s.Upload(gzipFile(file), key, "application/octet-stream", true); err != nil {
|
||||
if err := storage.Upload(gzipFile(file), key, "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage upload error: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -48,27 +44,24 @@ func main() {
|
|||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
env.String("GROUP_STORAGE"),
|
||||
[]string{
|
||||
[]string{
|
||||
env.String("TOPIC_TRIGGER"),
|
||||
},
|
||||
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
|
||||
switch msg.(type) {
|
||||
case *messages.SessionEnd:
|
||||
uploadKey(strconv.FormatUint(sessionID, 10), 5, storageWeb)
|
||||
//case *messages.IOSSessionEnd:
|
||||
// uploadKey(strconv.FormatUint(sessionID, 10), 5, storageIos)
|
||||
}
|
||||
},
|
||||
},
|
||||
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
|
||||
switch msg.(type) {
|
||||
case *messages.SessionEnd:
|
||||
uploadKey(strconv.FormatUint(sessionID, 10), 5)
|
||||
}
|
||||
},
|
||||
true,
|
||||
)
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
cleanTick := time.Tick(time.Duration(FS_CLEAN_HRS) * time.Hour)
|
||||
|
||||
cleanTick := time.Tick(time.Duration(FS_CLEAN_HRS) * time.Hour)
|
||||
|
||||
|
||||
log.Printf("Storage service started\n")
|
||||
log.Printf("Storage service started\n")
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
|
|
@ -76,7 +69,7 @@ func main() {
|
|||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-cleanTick:
|
||||
cleanDir(FS_DIR)
|
||||
go cleanDir(FS_DIR)
|
||||
default:
|
||||
err := consumer.ConsumeNext()
|
||||
if err != nil {
|
||||
|
|
@ -85,4 +78,3 @@ func main() {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -46,11 +46,13 @@ pg_port=5432
|
|||
pg_user=postgres
|
||||
pg_timeout=30
|
||||
pg_minconn=45
|
||||
PG_RETRY_MAX=50
|
||||
PG_RETRY_INTERVAL=2
|
||||
put_S3_TTL=20
|
||||
sentryURL=
|
||||
sessions_bucket=mobs
|
||||
sessions_region=us-east-1
|
||||
sourcemaps_bucket=sourcemaps
|
||||
sourcemaps_reader=http://utilities-openreplay.app.svc.cluster.local:9000/sourcemaps
|
||||
sourcemaps_reader=http://127.0.0.1:9000/
|
||||
stage=default-ee
|
||||
version_number=1.0.0
|
||||
|
|
|
|||
12
ee/api/.gitignore
vendored
12
ee/api/.gitignore
vendored
|
|
@ -180,9 +180,6 @@ Pipfile
|
|||
/chalicelib/core/alerts.py
|
||||
/chalicelib/core/alerts_processor.py
|
||||
/chalicelib/core/announcements.py
|
||||
/chalicelib/blueprints/bp_app_api.py
|
||||
/chalicelib/blueprints/bp_core.py
|
||||
/chalicelib/blueprints/bp_core_crons.py
|
||||
/chalicelib/core/collaboration_slack.py
|
||||
/chalicelib/core/errors_favorite_viewed.py
|
||||
/chalicelib/core/events.py
|
||||
|
|
@ -237,7 +234,6 @@ Pipfile
|
|||
/chalicelib/utils/smtp.py
|
||||
/chalicelib/utils/strings.py
|
||||
/chalicelib/utils/TimeUTC.py
|
||||
/chalicelib/blueprints/app/__init__.py
|
||||
/routers/app/__init__.py
|
||||
/routers/crons/__init__.py
|
||||
/routers/subs/__init__.py
|
||||
|
|
@ -245,8 +241,8 @@ Pipfile
|
|||
/chalicelib/core/assist.py
|
||||
/auth/auth_apikey.py
|
||||
/auth/auth_jwt.py
|
||||
/chalicelib/blueprints/subs/bp_insights.py
|
||||
/build.sh
|
||||
/routers/base.py
|
||||
/routers/core.py
|
||||
/routers/crons/core_crons.py
|
||||
/routers/subs/dashboard.py
|
||||
|
|
@ -257,10 +253,12 @@ Pipfile
|
|||
/chalicelib/core/heatmaps.py
|
||||
/routers/subs/insights.py
|
||||
/schemas.py
|
||||
/chalicelib/blueprints/app/v1_api.py
|
||||
/routers/app/v1_api.py
|
||||
/chalicelib/core/custom_metrics.py
|
||||
/chalicelib/core/performance_event.py
|
||||
/chalicelib/core/saved_search.py
|
||||
/app_alerts.py
|
||||
/build_alerts.sh
|
||||
/routers/subs/metrics.py
|
||||
/routers/subs/v1_api.py
|
||||
/chalicelib/core/dashboards2.py
|
||||
entrypoint.sh
|
||||
|
|
@ -6,6 +6,15 @@ WORKDIR /work
|
|||
COPY . .
|
||||
RUN pip install -r requirements.txt
|
||||
RUN mv .env.default .env
|
||||
ENV APP_NAME chalice
|
||||
# Installing Nodejs
|
||||
RUN apt update && apt install -y curl && \
|
||||
curl -fsSL https://deb.nodesource.com/setup_12.x | bash - && \
|
||||
apt install -y nodejs && \
|
||||
apt remove --purge -y curl && \
|
||||
rm -rf /var/lib/apt/lists/* && \
|
||||
cd sourcemap-reader && \
|
||||
npm install
|
||||
|
||||
# Add Tini
|
||||
# Startup daemon
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ COPY . .
|
|||
RUN pip install -r requirements.txt
|
||||
RUN mv .env.default .env && mv app_alerts.py app.py
|
||||
ENV pg_minconn 2
|
||||
ENV APP_NAME alerts
|
||||
|
||||
# Add Tini
|
||||
# Startup daemon
|
||||
|
|
|
|||
|
|
@ -1,10 +0,0 @@
|
|||
sudo yum update
|
||||
sudo yum install yum-utils
|
||||
sudo rpm --import https://repo.clickhouse.com/CLICKHOUSE-KEY.GPG
|
||||
sudo yum-config-manager --add-repo https://repo.clickhouse.com/rpm/stable/x86_64
|
||||
sudo yum update
|
||||
sudo service clickhouse-server restart
|
||||
|
||||
|
||||
#later mus use in clickhouse-client:
|
||||
#SET allow_experimental_window_functions = 1;
|
||||
|
|
@ -11,10 +11,10 @@ from starlette.responses import StreamingResponse, JSONResponse
|
|||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from routers import core, core_dynamic, ee, saml
|
||||
from routers.app import v1_api, v1_api_ee
|
||||
from routers.subs import v1_api
|
||||
from routers.crons import core_crons
|
||||
from routers.crons import core_dynamic_crons
|
||||
from routers.subs import dashboard
|
||||
from routers.subs import dashboard, insights, v1_api_ee
|
||||
|
||||
app = FastAPI()
|
||||
|
||||
|
|
@ -65,7 +65,7 @@ app.include_router(saml.public_app)
|
|||
app.include_router(saml.app)
|
||||
app.include_router(saml.app_apikey)
|
||||
app.include_router(dashboard.app)
|
||||
# app.include_router(insights.app)
|
||||
app.include_router(insights.app)
|
||||
app.include_router(v1_api.app_apikey)
|
||||
app.include_router(v1_api_ee.app_apikey)
|
||||
|
||||
|
|
|
|||
|
|
@ -6,41 +6,40 @@ from chalicelib.core import projects
|
|||
|
||||
|
||||
def get_state(tenant_id):
|
||||
my_projects = projects.get_projects(tenant_id=tenant_id, recording_state=False)
|
||||
pids = [s["projectId"] for s in my_projects]
|
||||
pids = projects.get_projects_ids(tenant_id=tenant_id)
|
||||
with pg_client.PostgresClient() as cur:
|
||||
recorded = False
|
||||
meta = False
|
||||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
SELECT
|
||||
COUNT(*)
|
||||
FROM public.sessions AS s
|
||||
where s.project_id IN %(ids)s
|
||||
LIMIT 1;""",
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)})
|
||||
)
|
||||
recorded = cur.fetchone()["count"] > 0
|
||||
recorded = cur.fetchone()["exists"]
|
||||
meta = False
|
||||
if recorded:
|
||||
cur.execute(
|
||||
cur.mogrify("""SELECT SUM((SELECT COUNT(t.meta)
|
||||
FROM (VALUES (p.metadata_1), (p.metadata_2), (p.metadata_3), (p.metadata_4), (p.metadata_5),
|
||||
(p.metadata_6), (p.metadata_7), (p.metadata_8), (p.metadata_9), (p.metadata_10),
|
||||
(sessions.user_id)) AS t(meta)
|
||||
WHERE t.meta NOTNULL))
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 'defined'
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id=p.project_id AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON(TRUE)
|
||||
WHERE p.tenant_id = %(tenant_id)s
|
||||
AND p.deleted_at ISNULL;"""
|
||||
cur.mogrify("""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = p.project_id
|
||||
AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||
WHERE p.tenant_id = %(tenant_id)s AND p.deleted_at ISNULL
|
||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;"""
|
||||
, {"tenant_id": tenant_id}))
|
||||
|
||||
meta = cur.fetchone()["sum"] > 0
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return [
|
||||
{"task": "Install OpenReplay",
|
||||
|
|
@ -61,22 +60,18 @@ def get_state(tenant_id):
|
|||
|
||||
|
||||
def get_state_installing(tenant_id):
|
||||
my_projects = projects.get_projects(tenant_id=tenant_id, recording_state=False)
|
||||
pids = [s["projectId"] for s in my_projects]
|
||||
pids = projects.get_projects_ids(tenant_id=tenant_id)
|
||||
with pg_client.PostgresClient() as cur:
|
||||
recorded = False
|
||||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
SELECT
|
||||
COUNT(*)
|
||||
FROM public.sessions AS s
|
||||
where s.project_id IN %(ids)s
|
||||
LIMIT 1;""",
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)})
|
||||
)
|
||||
recorded = cur.fetchone()["count"] > 0
|
||||
recorded = cur.fetchone()["exists"]
|
||||
|
||||
return {"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
|
|
@ -86,21 +81,24 @@ def get_state_installing(tenant_id):
|
|||
def get_state_identify_users(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""SELECT SUM((SELECT COUNT(t.meta)
|
||||
FROM (VALUES (p.metadata_1), (p.metadata_2), (p.metadata_3), (p.metadata_4), (p.metadata_5),
|
||||
(p.metadata_6), (p.metadata_7), (p.metadata_8), (p.metadata_9), (p.metadata_10),
|
||||
(sessions.user_id)) AS t(meta)
|
||||
WHERE t.meta NOTNULL))
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 'defined'
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id=p.project_id AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON(TRUE)
|
||||
WHERE p.tenant_id = %(tenant_id)s
|
||||
AND p.deleted_at ISNULL;"""
|
||||
cur.mogrify("""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = p.project_id
|
||||
AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||
WHERE p.tenant_id = %(tenant_id)s AND p.deleted_at ISNULL
|
||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;"""
|
||||
, {"tenant_id": tenant_id}))
|
||||
|
||||
meta = cur.fetchone()["sum"] > 0
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return {"task": "Identify Users",
|
||||
"done": meta,
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -1,9 +1,9 @@
|
|||
from chalicelib.core import sessions_metas
|
||||
from chalicelib.utils import helper, dev
|
||||
from chalicelib.utils import ch_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.core.dashboard import __get_constraint_values, __complete_missing_steps
|
||||
import schemas
|
||||
from chalicelib.core.dashboard import __get_basic_constraints, __get_meta_constraint
|
||||
from chalicelib.core.dashboard import __get_constraint_values, __complete_missing_steps
|
||||
from chalicelib.utils import ch_client
|
||||
from chalicelib.utils import helper, dev
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
|
||||
def __transform_journey(rows):
|
||||
|
|
@ -42,7 +42,7 @@ def journey(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTimestamp=
|
|||
elif f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]):
|
||||
event_table = JOURNEY_TYPES[f["value"]]["table"]
|
||||
event_column = JOURNEY_TYPES[f["value"]]["column"]
|
||||
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
meta_condition.append(f"sessions_metadata.user_id = %(user_id)s")
|
||||
meta_condition.append(f"sessions_metadata.project_id = %(project_id)s")
|
||||
meta_condition.append(f"sessions_metadata.datetime >= toDateTime(%(startTimestamp)s / 1000)")
|
||||
|
|
@ -303,7 +303,7 @@ def feature_retention(project_id, startTimestamp=TimeUTC.now(delta_days=-70), en
|
|||
elif f["type"] == "EVENT_VALUE":
|
||||
event_value = f["value"]
|
||||
default = False
|
||||
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
meta_condition.append(f"sessions_metadata.user_id = %(user_id)s")
|
||||
meta_condition.append("sessions_metadata.user_id IS NOT NULL")
|
||||
meta_condition.append("not empty(sessions_metadata.user_id)")
|
||||
|
|
@ -404,7 +404,7 @@ def feature_acquisition(project_id, startTimestamp=TimeUTC.now(delta_days=-70),
|
|||
elif f["type"] == "EVENT_VALUE":
|
||||
event_value = f["value"]
|
||||
default = False
|
||||
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
meta_condition.append(f"sessions_metadata.user_id = %(user_id)s")
|
||||
meta_condition.append("sessions_metadata.user_id IS NOT NULL")
|
||||
meta_condition.append("not empty(sessions_metadata.user_id)")
|
||||
|
|
@ -512,7 +512,7 @@ def feature_popularity_frequency(project_id, startTimestamp=TimeUTC.now(delta_da
|
|||
if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]):
|
||||
event_table = JOURNEY_TYPES[f["value"]]["table"]
|
||||
event_column = JOURNEY_TYPES[f["value"]]["column"]
|
||||
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
meta_condition.append(f"sessions_metadata.user_id = %(user_id)s")
|
||||
meta_condition.append("sessions_metadata.user_id IS NOT NULL")
|
||||
meta_condition.append("not empty(sessions_metadata.user_id)")
|
||||
|
|
@ -586,7 +586,7 @@ def feature_adoption(project_id, startTimestamp=TimeUTC.now(delta_days=-70), end
|
|||
elif f["type"] == "EVENT_VALUE":
|
||||
event_value = f["value"]
|
||||
default = False
|
||||
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
meta_condition.append(f"sessions_metadata.user_id = %(user_id)s")
|
||||
meta_condition.append("sessions_metadata.user_id IS NOT NULL")
|
||||
meta_condition.append("not empty(sessions_metadata.user_id)")
|
||||
|
|
@ -672,7 +672,7 @@ def feature_adoption_top_users(project_id, startTimestamp=TimeUTC.now(delta_days
|
|||
elif f["type"] == "EVENT_VALUE":
|
||||
event_value = f["value"]
|
||||
default = False
|
||||
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
meta_condition.append(f"sessions_metadata.user_id = %(user_id)s")
|
||||
meta_condition.append("user_id IS NOT NULL")
|
||||
meta_condition.append("not empty(sessions_metadata.user_id)")
|
||||
|
|
@ -742,7 +742,7 @@ def feature_adoption_daily_usage(project_id, startTimestamp=TimeUTC.now(delta_da
|
|||
elif f["type"] == "EVENT_VALUE":
|
||||
event_value = f["value"]
|
||||
default = False
|
||||
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
meta_condition.append(f"sessions_metadata.user_id = %(user_id)s")
|
||||
meta_condition.append("sessions_metadata.project_id = %(project_id)s")
|
||||
meta_condition.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)")
|
||||
|
|
@ -807,7 +807,7 @@ def feature_intensity(project_id, startTimestamp=TimeUTC.now(delta_days=-70), en
|
|||
if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]):
|
||||
event_table = JOURNEY_TYPES[f["value"]]["table"]
|
||||
event_column = JOURNEY_TYPES[f["value"]]["column"]
|
||||
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
meta_condition.append(f"sessions_metadata.user_id = %(user_id)s")
|
||||
meta_condition.append("sessions_metadata.project_id = %(project_id)s")
|
||||
meta_condition.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)")
|
||||
|
|
@ -847,7 +847,7 @@ def users_active(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTime
|
|||
for f in filters:
|
||||
if f["type"] == "PERIOD" and f["value"] in ["DAY", "WEEK"]:
|
||||
period = f["value"]
|
||||
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
meta_condition.append(f"sessions_metadata.user_id = %(user_id)s")
|
||||
extra_values["user_id"] = f["value"]
|
||||
period_function = PERIOD_TO_FUNCTION[period]
|
||||
|
|
@ -940,7 +940,7 @@ def users_slipping(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTi
|
|||
elif f["type"] == "EVENT_VALUE":
|
||||
event_value = f["value"]
|
||||
default = False
|
||||
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
|
||||
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
|
||||
meta_condition.append(f"sessions_metadata.user_id = %(user_id)s")
|
||||
meta_condition.append("sessions_metadata.project_id = %(project_id)s")
|
||||
meta_condition.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)")
|
||||
|
|
@ -1044,4 +1044,4 @@ def search(text, feature_type, project_id, platform=None):
|
|||
rows = ch.execute(ch_query, params)
|
||||
else:
|
||||
return []
|
||||
return [helper.dict_to_camel_case(row) for row in rows]
|
||||
return [helper.dict_to_camel_case(row) for row in rows]
|
||||
|
|
|
|||
|
|
@ -82,22 +82,22 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
|
|||
rows = cur.fetchall()
|
||||
if recording_state:
|
||||
project_ids = [f'({r["project_id"]})' for r in rows]
|
||||
query = f"""SELECT projects.project_id, COALESCE(MAX(start_ts), 0) AS last
|
||||
FROM (VALUES {",".join(project_ids)}) AS projects(project_id)
|
||||
LEFT JOIN sessions USING (project_id)
|
||||
GROUP BY project_id;"""
|
||||
cur.execute(
|
||||
query=query
|
||||
)
|
||||
query = cur.mogrify(f"""SELECT projects.project_id, COALESCE(MAX(start_ts), 0) AS last
|
||||
FROM (VALUES {",".join(project_ids)}) AS projects(project_id)
|
||||
LEFT JOIN sessions USING (project_id)
|
||||
WHERE sessions.start_ts >= %(startDate)s AND sessions.start_ts <= %(endDate)s
|
||||
GROUP BY project_id;""",
|
||||
{"startDate": TimeUTC.now(delta_days=-3), "endDate": TimeUTC.now(delta_days=1)})
|
||||
|
||||
cur.execute(query=query)
|
||||
status = cur.fetchall()
|
||||
for r in rows:
|
||||
r["status"] = "red"
|
||||
for s in status:
|
||||
if s["project_id"] == r["project_id"]:
|
||||
if s["last"] < TimeUTC.now(-2):
|
||||
r["status"] = "red"
|
||||
elif s["last"] < TimeUTC.now(-1):
|
||||
if TimeUTC.now(-2) <= s["last"] < TimeUTC.now(-1):
|
||||
r["status"] = "yellow"
|
||||
else:
|
||||
elif s["last"] >= TimeUTC.now(-1):
|
||||
r["status"] = "green"
|
||||
break
|
||||
|
||||
|
|
@ -257,7 +257,8 @@ def get_project_key(project_id):
|
|||
where project_id =%(project_id)s AND deleted_at ISNULL;""",
|
||||
{"project_id": project_id})
|
||||
)
|
||||
return cur.fetchone()["project_key"]
|
||||
project = cur.fetchone()
|
||||
return project["project_key"] if project is not None else None
|
||||
|
||||
|
||||
def get_capture_status(project_id):
|
||||
|
|
@ -324,7 +325,7 @@ def is_authorized_batch(project_ids, tenant_id):
|
|||
query = cur.mogrify("""\
|
||||
SELECT project_id
|
||||
FROM public.projects
|
||||
where tenant_id =%(tenant_id)s
|
||||
WHERE tenant_id =%(tenant_id)s
|
||||
AND project_id IN %(project_ids)s
|
||||
AND deleted_at IS NULL;""",
|
||||
{"tenant_id": tenant_id, "project_ids": tuple(project_ids)})
|
||||
|
|
@ -334,3 +335,13 @@ def is_authorized_batch(project_ids, tenant_id):
|
|||
)
|
||||
rows = cur.fetchall()
|
||||
return [r["project_id"] for r in rows]
|
||||
|
||||
|
||||
def get_projects_ids(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify("""SELECT s.project_id
|
||||
FROM public.projects AS s
|
||||
WHERE tenant_id =%(tenant_id)s AND s.deleted_at IS NULL
|
||||
ORDER BY s.project_id;""", {"tenant_id": tenant_id}))
|
||||
rows = cur.fetchall()
|
||||
return [r["project_id"] for r in rows]
|
||||
|
|
|
|||
|
|
@ -3,14 +3,14 @@ from chalicelib.utils import ch_client
|
|||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
|
||||
def get_by_session_id(session_id):
|
||||
def get_by_session_id(session_id, project_id):
|
||||
with ch_client.ClickHouseClient() as ch:
|
||||
ch_query = """\
|
||||
SELECT
|
||||
datetime,url,type,duration,ttfb,header_size,encoded_body_size,decoded_body_size,success,coalesce(status,if(success, 200, status)) AS status
|
||||
FROM resources
|
||||
WHERE session_id = toUInt64(%(session_id)s);"""
|
||||
params = {"session_id": session_id}
|
||||
WHERE session_id = toUInt64(%(session_id)s) AND project_id=%(project_id)s;"""
|
||||
params = {"session_id": session_id, "project_id": project_id}
|
||||
rows = ch.execute(query=ch_query, params=params)
|
||||
results = []
|
||||
for r in rows:
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ def compute():
|
|||
RETURNING *,(SELECT email FROM users_ee WHERE role = 'owner' AND users_ee.tenant_id = tenants.tenant_id LIMIT 1);"""
|
||||
)
|
||||
data = cur.fetchall()
|
||||
requests.post('https://parrot.asayer.io/os/telemetry',
|
||||
requests.post('https://api.openreplay.com/os/telemetry',
|
||||
json={"stats": [process_data(d, edition='ee') for d in data]})
|
||||
|
||||
|
||||
|
|
@ -65,4 +65,4 @@ def new_client(tenant_id):
|
|||
FROM public.tenants
|
||||
WHERE tenant_id=%(tenant_id)s;""", {"tenant_id": tenant_id}))
|
||||
data = cur.fetchone()
|
||||
requests.post('https://parrot.asayer.io/os/signup', json=process_data(data, edition='ee'))
|
||||
requests.post('https://api.openreplay.com/os/signup', json=process_data(data, edition='ee'))
|
||||
|
|
@ -632,7 +632,6 @@ def change_jwt_iat(user_id):
|
|||
return cur.fetchone().get("jwt_iat")
|
||||
|
||||
|
||||
@dev.timed
|
||||
def authenticate(email, password, for_change_password=False, for_plugin=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
|
|
|
|||
|
|
@ -25,5 +25,8 @@ class ClickHouseClient:
|
|||
def client(self):
|
||||
return self.__client
|
||||
|
||||
def format(self, query, params):
|
||||
return self.__client.substitute_params(query, params)
|
||||
|
||||
def __exit__(self, *args):
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -1,2 +0,0 @@
|
|||
#!/bin/bash
|
||||
uvicorn app:app --host 0.0.0.0 --reload
|
||||
|
|
@ -4,11 +4,11 @@ boto3==1.16.1
|
|||
pyjwt==1.7.1
|
||||
psycopg2-binary==2.8.6
|
||||
elasticsearch==7.9.1
|
||||
jira==2.0.0
|
||||
jira==3.1.1
|
||||
clickhouse-driver==0.2.2
|
||||
python3-saml==1.12.0
|
||||
|
||||
fastapi==0.74.1
|
||||
fastapi==0.75.0
|
||||
python-multipart==0.0.5
|
||||
uvicorn[standard]==0.17.5
|
||||
python-decouple==3.6
|
||||
|
|
|
|||
|
|
@ -1,14 +0,0 @@
|
|||
from fastapi import APIRouter, Depends
|
||||
|
||||
from auth.auth_apikey import APIKeyAuth
|
||||
from auth.auth_jwt import JWTAuth
|
||||
from auth.auth_project import ProjectAuthorizer
|
||||
from or_dependencies import ORRoute
|
||||
|
||||
|
||||
def get_routers() -> (APIRouter, APIRouter, APIRouter):
|
||||
public_app = APIRouter(route_class=ORRoute)
|
||||
app = APIRouter(dependencies=[Depends(JWTAuth()), Depends(ProjectAuthorizer("projectId"))], route_class=ORRoute)
|
||||
app_apikey = APIRouter(dependencies=[Depends(APIKeyAuth()), Depends(ProjectAuthorizer("projectKey"))],
|
||||
route_class=ORRoute)
|
||||
return public_app, app, app_apikey
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue