Compare commits
33 commits
main
...
snyk-fix-c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7d36bda20b | ||
|
|
790e1001b7 | ||
|
|
b707725906 | ||
|
|
f7198e391d | ||
|
|
c8fb77ad27 | ||
|
|
735af9a008 | ||
|
|
66637147c6 | ||
|
|
19d794225d | ||
|
|
aa52434780 | ||
|
|
f52d5f021e | ||
|
|
7e672e2315 | ||
|
|
1fb852590c | ||
|
|
b5375df6e1 | ||
|
|
495038f5bd | ||
|
|
ec4d1ec9a5 | ||
|
|
77281ebd3e | ||
|
|
aeea4e50aa | ||
|
|
495927a717 | ||
|
|
7262fd2220 | ||
|
|
0905726474 | ||
|
|
3ae4983154 | ||
|
|
ece2631c60 | ||
|
|
48954352fe | ||
|
|
d3c18f9af6 | ||
|
|
bd391ca935 | ||
|
|
362133f110 | ||
|
|
dcf6d24abd | ||
|
|
b2ac6ba0f8 | ||
|
|
34729e87ff | ||
|
|
74950dbe72 | ||
|
|
82943ab19b | ||
|
|
be1ae8e89e | ||
|
|
d17a32af30 |
25 changed files with 496 additions and 443 deletions
|
|
@ -20,7 +20,7 @@ def get_canvas_presigned_urls(session_id, project_id):
|
|||
"projectId": project_id,
|
||||
"recordingId": rows[i]["recording_id"]
|
||||
}
|
||||
key = config("CANVAS_PATTERN", default="%(sessionId)/%(recordingId)s.mp4") % params
|
||||
key = config("CANVAS_PATTERN", default="%(sessionId)s/%(recordingId)s.mp4") % params
|
||||
rows[i] = StorageClient.get_presigned_url_for_sharing(
|
||||
bucket=config("CANVAS_BUCKET", default=config("sessions_bucket")),
|
||||
expires_in=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900),
|
||||
|
|
|
|||
|
|
@ -122,12 +122,10 @@ def get_replay(project_id, session_id, context: schemas.CurrentContext, full_dat
|
|||
data = helper.dict_to_camel_case(data)
|
||||
if full_data:
|
||||
if data["platform"] == 'ios':
|
||||
data['domURL'] = []
|
||||
data['mobsUrl'] = []
|
||||
data['videoURL'] = sessions_mobs.get_ios_videos(session_id=session_id, project_id=project_id,
|
||||
check_existence=False)
|
||||
else:
|
||||
data['domURL'] = sessions_mobs.get_urls(session_id=session_id, project_id=project_id,
|
||||
check_existence=False)
|
||||
data['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session_id, check_existence=False)
|
||||
data['devtoolsURL'] = sessions_devtool.get_urls(session_id=session_id, project_id=project_id,
|
||||
check_existence=False)
|
||||
|
|
@ -139,6 +137,8 @@ def get_replay(project_id, session_id, context: schemas.CurrentContext, full_dat
|
|||
else:
|
||||
data['utxVideo'] = []
|
||||
|
||||
data['domURL'] = sessions_mobs.get_urls(session_id=session_id, project_id=project_id,
|
||||
check_existence=False)
|
||||
data['metadata'] = __group_metadata(project_metadata=data.pop("projectMetadata"), session=data)
|
||||
data['live'] = live and assist.is_live(project_id=project_id, session_id=session_id,
|
||||
project_key=data["projectKey"])
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import logging
|
|||
|
||||
from fastapi import HTTPException, status
|
||||
|
||||
from chalicelib.core.db_request_handler import DatabaseRequestHandler
|
||||
from chalicelib.utils.db_request_handler import DatabaseRequestHandler
|
||||
from chalicelib.core.usability_testing.schema import UTTestCreate, UTTestSearch, UTTestUpdate
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.helper import dict_to_camel_case, list_to_camel_case
|
||||
|
|
|
|||
|
|
@ -125,6 +125,10 @@ def update(tenant_id, user_id, changes, output=True):
|
|||
if key == "password":
|
||||
sub_query_bauth.append("password = crypt(%(password)s, gen_salt('bf', 12))")
|
||||
sub_query_bauth.append("changed_at = timezone('utc'::text, now())")
|
||||
sub_query_bauth.append("change_pwd_expire_at = NULL")
|
||||
sub_query_bauth.append("change_pwd_token = NULL")
|
||||
sub_query_bauth.append("invitation_token = NULL")
|
||||
sub_query_bauth.append("invited_at = NULL")
|
||||
else:
|
||||
sub_query_bauth.append(f"{helper.key_to_snake_case(key)} = %({key})s")
|
||||
else:
|
||||
|
|
@ -445,9 +449,7 @@ def change_password(tenant_id, user_id, email, old_password, new_password):
|
|||
|
||||
|
||||
def set_password_invitation(user_id, new_password):
|
||||
changes = {"password": new_password,
|
||||
"invitationToken": None, "invitedAt": None,
|
||||
"changePwdExpireAt": None, "changePwdToken": None}
|
||||
changes = {"password": new_password}
|
||||
user = update(tenant_id=-1, user_id=user_id, changes=changes)
|
||||
r = authenticate(user['email'], new_password)
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ def transform_email(email: str) -> str:
|
|||
|
||||
|
||||
def int_to_string(value: int) -> str:
|
||||
return str(value) if isinstance(value, int) else int
|
||||
return str(value) if isinstance(value, int) else value
|
||||
|
||||
|
||||
def remove_whitespace(value: str) -> str:
|
||||
|
|
|
|||
1
ee/api/.gitignore
vendored
1
ee/api/.gitignore
vendored
|
|
@ -274,3 +274,4 @@ Pipfile.lock
|
|||
/orpy.py
|
||||
/chalicelib/core/usability_testing/
|
||||
/NOTES.md
|
||||
/chalicelib/utils/db_request_handler.py
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ async def lifespan(app: FastAPI):
|
|||
await events_queue.init()
|
||||
app.schedule.start()
|
||||
|
||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs + traces.cron_jobs + ee_crons.ee_cron_jobs:
|
||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs + traces.cron_jobs + ee_crons.cron_jobs:
|
||||
app.schedule.add_job(id=job["func"].__name__, **job)
|
||||
|
||||
ap_logger.info(">Scheduled jobs:")
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
import logging
|
||||
from datetime import datetime
|
||||
|
||||
from fastapi import HTTPException
|
||||
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from schemas import AssistStatsSessionsRequest, AssistStatsSessionsResponse, AssistStatsTopMembersResponse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
event_type_mapping = {
|
||||
"sessionsAssisted": "assist",
|
||||
"assistDuration": "assist",
|
||||
|
|
@ -17,12 +18,12 @@ event_type_mapping = {
|
|||
def insert_aggregated_data():
|
||||
try:
|
||||
logging.info("Assist Stats: Inserting aggregated data")
|
||||
end_timestamp = int(datetime.timestamp(datetime.now())) * 1000
|
||||
end_timestamp = TimeUTC.now()
|
||||
start_timestamp = __last_run_end_timestamp_from_aggregates()
|
||||
|
||||
if start_timestamp is None: # first run
|
||||
logging.info("Assist Stats: First run, inserting data for last 7 days")
|
||||
start_timestamp = end_timestamp - (7 * 24 * 60 * 60 * 1000)
|
||||
start_timestamp = end_timestamp - TimeUTC.MS_WEEK
|
||||
|
||||
offset = 0
|
||||
chunk_size = 1000
|
||||
|
|
@ -103,9 +104,8 @@ def __last_run_end_timestamp_from_aggregates():
|
|||
result = cur.fetchone()
|
||||
last_run_time = result['last_run_time'] if result else None
|
||||
|
||||
if last_run_time is None: # first run handle all data
|
||||
sql = "SELECT MIN(timestamp) as last_timestamp FROM assist_events;"
|
||||
with pg_client.PostgresClient() as cur:
|
||||
if last_run_time is None: # first run handle all data
|
||||
sql = "SELECT MIN(timestamp) as last_timestamp FROM assist_events;"
|
||||
cur.execute(sql)
|
||||
result = cur.fetchone()
|
||||
last_run_time = result['last_timestamp'] if result else None
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
import ast
|
||||
import logging
|
||||
from typing import List, Union
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import events, metadata, projects, performance_event, metrics
|
||||
from chalicelib.core import events, metadata, projects, performance_event, metrics, sessions_legacy
|
||||
from chalicelib.utils import pg_client, helper, metrics_helper, ch_client, exp_ch_helper
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
SESSION_PROJECTION_COLS_CH = """\
|
||||
|
|
@ -353,6 +353,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
|||
step_size = int(metrics_helper.__get_step_size(endTimestamp=data.endTimestamp, startTimestamp=data.startTimestamp,
|
||||
density=density))
|
||||
extra_event = None
|
||||
extra_deduplication = []
|
||||
if metric_of == schemas.MetricOfTable.visited_url:
|
||||
extra_event = f"""SELECT DISTINCT ev.session_id, ev.url_path
|
||||
FROM {exp_ch_helper.get_main_events_table(data.startTimestamp)} AS ev
|
||||
|
|
@ -360,12 +361,14 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
|||
AND ev.datetime <= toDateTime(%(endDate)s / 1000)
|
||||
AND ev.project_id = %(project_id)s
|
||||
AND ev.event_type = 'LOCATION'"""
|
||||
extra_deduplication.append("url_path")
|
||||
elif metric_of == schemas.MetricOfTable.issues and len(metric_value) > 0:
|
||||
data.filters.append(schemas.SessionSearchFilterSchema(value=metric_value, type=schemas.FilterType.issue,
|
||||
operator=schemas.SearchEventOperator._is))
|
||||
full_args, query_part = search_query_parts_ch(data=data, error_status=None, errors_only=False,
|
||||
favorite_only=False, issue=None, project_id=project_id,
|
||||
user_id=None, extra_event=extra_event)
|
||||
user_id=None, extra_event=extra_event,
|
||||
extra_deduplication=extra_deduplication)
|
||||
full_args["step_size"] = step_size
|
||||
sessions = []
|
||||
with ch_client.ClickHouseClient() as cur:
|
||||
|
|
@ -434,7 +437,6 @@ def search_table_of_individual_issues(data: schemas.SessionsSearchPayloadSchema,
|
|||
full_args["issues_limit"] = data.limit
|
||||
full_args["issues_limit_s"] = (data.page - 1) * data.limit
|
||||
full_args["issues_limit_e"] = data.page * data.limit
|
||||
print(full_args)
|
||||
main_query = cur.format(f"""SELECT issues.type AS name,
|
||||
issues.context_string AS value,
|
||||
COUNT(DISTINCT raw_sessions.session_id) AS session_count,
|
||||
|
|
@ -519,7 +521,7 @@ def __get_event_type(event_type: Union[schemas.EventType, schemas.PerformanceEve
|
|||
|
||||
# this function generates the query and return the generated-query with the dict of query arguments
|
||||
def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_status, errors_only, favorite_only, issue,
|
||||
project_id, user_id, platform="web", extra_event=None):
|
||||
project_id, user_id, platform="web", extra_event=None, extra_deduplication=[]):
|
||||
ss_constraints = []
|
||||
full_args = {"project_id": project_id, "startDate": data.startTimestamp, "endDate": data.endTimestamp,
|
||||
"projectId": project_id, "userId": user_id}
|
||||
|
|
@ -1391,15 +1393,15 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
_value_conditions_not.append(_p)
|
||||
value_conditions_not.append(p)
|
||||
del _value_conditions_not
|
||||
sequence_conditions += value_conditions_not
|
||||
# sequence_conditions += value_conditions_not
|
||||
events_extra_join += f"""LEFT ANTI JOIN ( SELECT DISTINCT session_id
|
||||
FROM {MAIN_EVENTS_TABLE} AS main
|
||||
WHERE {' AND '.join(__events_where_basic)}
|
||||
AND ({' OR '.join(value_conditions_not)})) AS sub USING(session_id)"""
|
||||
|
||||
# if has_values:
|
||||
# events_conditions = [c for c in list(set(sequence_conditions))]
|
||||
# events_conditions_where.append(f"({' OR '.join(events_conditions)})")
|
||||
if has_values and len(sequence_conditions) > 0:
|
||||
events_conditions = [c for c in list(set(sequence_conditions))]
|
||||
events_conditions_where.append(f"({' OR '.join(events_conditions)})")
|
||||
|
||||
events_query_part = f"""SELECT main.session_id,
|
||||
MIN(main.datetime) AS first_event_ts,
|
||||
|
|
@ -1487,11 +1489,12 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
FROM {MAIN_SESSIONS_TABLE} AS s {extra_event}
|
||||
WHERE {" AND ".join(extra_constraints)}) AS s ON(s.session_id=f.session_id)"""
|
||||
else:
|
||||
deduplication_keys = ["session_id"] + extra_deduplication
|
||||
extra_join = f"""(SELECT *
|
||||
FROM {MAIN_SESSIONS_TABLE} AS s {extra_join} {extra_event}
|
||||
WHERE {" AND ".join(extra_constraints)}
|
||||
ORDER BY _timestamp DESC
|
||||
LIMIT 1 BY session_id) AS s"""
|
||||
LIMIT 1 BY {",".join(deduplication_keys)}) AS s"""
|
||||
query_part = f"""\
|
||||
FROM {f"({events_query_part}) AS f" if len(events_query_part) > 0 else ""}
|
||||
{extra_join}
|
||||
|
|
@ -1665,3 +1668,29 @@ def check_recording_status(project_id: int) -> dict:
|
|||
"recordingStatus": row["recording_status"],
|
||||
"sessionsCount": row["sessions_count"]
|
||||
}
|
||||
|
||||
|
||||
# TODO: rewrite this function to use ClickHouse
|
||||
def search_sessions_by_ids(project_id: int, session_ids: list, sort_by: str = 'session_id',
|
||||
ascending: bool = False) -> dict:
|
||||
if session_ids is None or len(session_ids) == 0:
|
||||
return {"total": 0, "sessions": []}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
meta_keys = metadata.get(project_id=project_id)
|
||||
params = {"project_id": project_id, "session_ids": tuple(session_ids)}
|
||||
order_direction = 'ASC' if ascending else 'DESC'
|
||||
main_query = cur.mogrify(f"""SELECT {sessions_legacy.SESSION_PROJECTION_BASE_COLS}
|
||||
{"," if len(meta_keys) > 0 else ""}{",".join([f'metadata_{m["index"]}' for m in meta_keys])}
|
||||
FROM public.sessions AS s
|
||||
WHERE project_id=%(project_id)s
|
||||
AND session_id IN %(session_ids)s
|
||||
ORDER BY {sort_by} {order_direction};""", params)
|
||||
|
||||
cur.execute(main_query)
|
||||
rows = cur.fetchall()
|
||||
if len(meta_keys) > 0:
|
||||
for s in rows:
|
||||
s["metadata"] = {}
|
||||
for m in meta_keys:
|
||||
s["metadata"][m["key"]] = s.pop(f'metadata_{m["index"]}')
|
||||
return {"total": len(rows), "sessions": helper.list_to_camel_case(rows)}
|
||||
|
|
|
|||
|
|
@ -134,8 +134,6 @@ def get_replay(project_id, session_id, context: schemas.CurrentContext, full_dat
|
|||
data['videoURL'] = sessions_mobs.get_ios_videos(session_id=session_id, project_id=project_id,
|
||||
check_existence=False)
|
||||
else:
|
||||
data['domURL'] = sessions_mobs.get_urls(session_id=session_id, project_id=project_id,
|
||||
check_existence=False)
|
||||
data['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session_id, check_existence=False)
|
||||
data['devtoolsURL'] = sessions_devtool.get_urls(session_id=session_id, project_id=project_id,
|
||||
context=context, check_existence=False)
|
||||
|
|
@ -147,6 +145,8 @@ def get_replay(project_id, session_id, context: schemas.CurrentContext, full_dat
|
|||
else:
|
||||
data['utxVideo'] = []
|
||||
|
||||
data['domURL'] = sessions_mobs.get_urls(session_id=session_id, project_id=project_id,
|
||||
check_existence=False)
|
||||
data['metadata'] = __group_metadata(project_metadata=data.pop("projectMetadata"), session=data)
|
||||
data['live'] = live and assist.is_live(project_id=project_id, session_id=session_id,
|
||||
project_key=data["projectKey"])
|
||||
|
|
|
|||
|
|
@ -150,6 +150,10 @@ def update(tenant_id, user_id, changes, output=True):
|
|||
if key == "password":
|
||||
sub_query_bauth.append("password = crypt(%(password)s, gen_salt('bf', 12))")
|
||||
sub_query_bauth.append("changed_at = timezone('utc'::text, now())")
|
||||
sub_query_bauth.append("change_pwd_expire_at = NULL")
|
||||
sub_query_bauth.append("change_pwd_token = NULL")
|
||||
sub_query_bauth.append("invitation_token = NULL")
|
||||
sub_query_bauth.append("invited_at = NULL")
|
||||
else:
|
||||
sub_query_bauth.append(f"{helper.key_to_snake_case(key)} = %({key})s")
|
||||
else:
|
||||
|
|
@ -524,9 +528,7 @@ def change_password(tenant_id, user_id, email, old_password, new_password):
|
|||
|
||||
|
||||
def set_password_invitation(tenant_id, user_id, new_password):
|
||||
changes = {"password": new_password,
|
||||
"invitationToken": None, "invitedAt": None,
|
||||
"changePwdExpireAt": None, "changePwdToken": None}
|
||||
changes = {"password": new_password}
|
||||
user = update(tenant_id=tenant_id, user_id=user_id, changes=changes)
|
||||
r = authenticate(user['email'], new_password)
|
||||
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ rm -rf ./chalicelib/core/user_testing.py
|
|||
rm -rf ./chalicelib/saml
|
||||
rm -rf ./chalicelib/utils/__init__.py
|
||||
rm -rf ./chalicelib/utils/args_transformer.py
|
||||
rm -rf ./chalicelib/utils/canvas.py
|
||||
rm -rf ./chalicelib/core/canvas.py
|
||||
rm -rf ./chalicelib/utils/captcha.py
|
||||
rm -rf ./chalicelib/utils/dev.py
|
||||
rm -rf ./chalicelib/utils/email_handler.py
|
||||
|
|
@ -93,4 +93,5 @@ rm -rf ./schemas/overrides.py
|
|||
rm -rf ./schemas/schemas.py
|
||||
rm -rf ./schemas/transformers_validators.py
|
||||
rm -rf ./orpy.py
|
||||
rm -rf ./chalicelib/core/usability_testing/
|
||||
rm -rf ./chalicelib/core/usability_testing/
|
||||
rm -rf ./chalicelib/utils/db_request_handler.py
|
||||
|
|
@ -3,6 +3,8 @@ from apscheduler.triggers.interval import IntervalTrigger
|
|||
from chalicelib.utils import events_queue
|
||||
from chalicelib.core import assist_stats
|
||||
|
||||
from decouple import config
|
||||
|
||||
|
||||
async def pg_events_queue() -> None:
|
||||
events_queue.global_queue.force_flush()
|
||||
|
|
@ -12,8 +14,14 @@ async def assist_events_aggregates_cron() -> None:
|
|||
assist_stats.insert_aggregated_data()
|
||||
|
||||
|
||||
ee_cron_jobs = [
|
||||
{"func": pg_events_queue, "trigger": IntervalTrigger(minutes=5), "misfire_grace_time": 20, "max_instances": 1},
|
||||
{"func": assist_events_aggregates_cron,
|
||||
"trigger": IntervalTrigger(hours=1, start_date="2023-04-01 0:0:0", jitter=10), }
|
||||
# SINGLE_CRONS are crons that will be run the crons-service, they are a singleton crons
|
||||
SINGLE_CRONS = [{"func": assist_events_aggregates_cron,
|
||||
"trigger": IntervalTrigger(hours=1, start_date="2023-04-01 0:0:0", jitter=10)}]
|
||||
|
||||
# cron_jobs is the list of crons to run in main API service (so you will have as many runs as the number of instances of the API)
|
||||
cron_jobs = [
|
||||
{"func": pg_events_queue, "trigger": IntervalTrigger(minutes=5), "misfire_grace_time": 20, "max_instances": 1}
|
||||
]
|
||||
|
||||
if config("LOCAL_CRONS", default=False, cast=bool):
|
||||
cron_jobs += SINGLE_CRONS
|
||||
|
|
|
|||
|
|
@ -4,4 +4,7 @@ SQLAlchemy==1.4.43
|
|||
google-cloud-bigquery==3.4.2
|
||||
pandas==1.5.1
|
||||
PyYAML==6.0
|
||||
pandas-gbq==0.19.2
|
||||
pandas-gbq==0.19.2
|
||||
numpy>=1.22.2 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
pyarrow>=14.0.1 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
setuptools>=65.5.1 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
|
|
@ -22,5 +22,5 @@ MINIO_ACCESS_KEY = ''
|
|||
MINIO_SECRET_KEY = ''
|
||||
|
||||
# APP and TRACKER VERSIONS
|
||||
VERSION = 1.14.0
|
||||
TRACKER_VERSION = '9.0.0'
|
||||
VERSION = 1.16.3
|
||||
TRACKER_VERSION = '11.0.1'
|
||||
|
|
|
|||
|
|
@ -159,6 +159,8 @@ export default class APIClient {
|
|||
} else {
|
||||
return Promise.reject({ message: `! ${this.init.method} error on ${path}; ${response.status}`, response });
|
||||
}
|
||||
}).catch((error) => {
|
||||
return Promise.reject({ message: `! ${this.init.method} error on ${path};` });
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -78,7 +78,6 @@ function MobileOverviewPanelCont({ issuesList }: { issuesList: Record<string, a
|
|||
|
||||
function WebOverviewPanelCont({ issuesList }: { issuesList: Record<string, any>[] }) {
|
||||
const { store } = React.useContext(PlayerContext);
|
||||
const [dataLoaded, setDataLoaded] = React.useState(false);
|
||||
const [selectedFeatures, setSelectedFeatures] = React.useState([
|
||||
'PERFORMANCE',
|
||||
'FRUSTRATIONS',
|
||||
|
|
@ -93,7 +92,7 @@ function WebOverviewPanelCont({ issuesList }: { issuesList: Record<string, any>[
|
|||
} = store.get();
|
||||
|
||||
const stackEventList = tabStates[currentTab]?.stackList || []
|
||||
const eventsList = tabStates[currentTab]?.eventList || []
|
||||
// const eventsList = tabStates[currentTab]?.eventList || []
|
||||
const frustrationsList = tabStates[currentTab]?.frustrationsList || []
|
||||
const exceptionsList = tabStates[currentTab]?.exceptionsList || []
|
||||
const resourceListUnmap = tabStates[currentTab]?.resourceList || []
|
||||
|
|
@ -116,24 +115,7 @@ function WebOverviewPanelCont({ issuesList }: { issuesList: Record<string, any>[
|
|||
PERFORMANCE: performanceChartData,
|
||||
FRUSTRATIONS: frustrationsList,
|
||||
};
|
||||
}, [dataLoaded, currentTab]);
|
||||
|
||||
useEffect(() => {
|
||||
if (dataLoaded) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
resourceList.length > 0 ||
|
||||
exceptionsList.length > 0 ||
|
||||
eventsList.length > 0 ||
|
||||
stackEventList.length > 0 ||
|
||||
issuesList.length > 0 ||
|
||||
performanceChartData.length > 0
|
||||
) {
|
||||
setDataLoaded(true);
|
||||
}
|
||||
}, [resourceList, issuesList, exceptionsList, eventsList, stackEventList, performanceChartData, currentTab]);
|
||||
}, [tabStates, currentTab]);
|
||||
|
||||
return <PanelComponent resources={resources} endTime={endTime} selectedFeatures={selectedFeatures} fetchPresented={fetchPresented} setSelectedFeatures={setSelectedFeatures} />
|
||||
}
|
||||
|
|
|
|||
|
|
@ -146,7 +146,9 @@ function FilterAutoComplete(props: Props) {
|
|||
const loadOptions = (inputValue: string, callback: (options: []) => void) => {
|
||||
// remove underscore from params
|
||||
const _params = Object.keys(params).reduce((acc: any, key: string) => {
|
||||
acc[key] = params[key].replace(/^_/, '');
|
||||
if (key === 'type' && params[key] === 'metadata') {
|
||||
acc[key] = params[key].replace(/^_/, '');
|
||||
}
|
||||
return acc;
|
||||
}, {});
|
||||
|
||||
|
|
|
|||
|
|
@ -642,7 +642,8 @@ export const addElementToLiveFiltersMap = (
|
|||
icon = 'filters/metadata'
|
||||
) => {
|
||||
liveFiltersMap[key] = {
|
||||
key, type, category, label: capitalize(key),
|
||||
key, type, category,
|
||||
label: key.replace(/^_/, '').charAt(0).toUpperCase() + key.slice(2),
|
||||
operator: operator,
|
||||
operatorOptions,
|
||||
icon,
|
||||
|
|
|
|||
|
|
@ -5,9 +5,9 @@ original_env_file="$1"
|
|||
|
||||
# Check if the original env file exists and is not empty
|
||||
if [ ! -s "$original_env_file" ]; then
|
||||
echo "Error: The original env file is empty or does not exist."
|
||||
echo "Usage: $0 /path/to/original.env"
|
||||
exit 1
|
||||
echo "Error: The original env file is empty or does not exist."
|
||||
echo "Usage: $0 /path/to/original.env"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
new_env_file="./common.env"
|
||||
|
|
@ -15,99 +15,111 @@ temp_env_file=$(mktemp)
|
|||
|
||||
# Function to merge environment variables from original to new env file
|
||||
function merge_envs() {
|
||||
while IFS='=' read -r key value; do
|
||||
# Skip the line if the key is COMMON_VERSION
|
||||
case "$key" in
|
||||
COMMON_VERSION)
|
||||
original_version=$(echo "$value" | xargs)
|
||||
continue
|
||||
;;
|
||||
COMMON_PG_PASSWORD)
|
||||
pgpassword=$value
|
||||
;;
|
||||
POSTGRES_VERSION | REDIS_VERSION | MINIO_VERSION)
|
||||
# Don't update db versions automatically.
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
while IFS='=' read -r key value; do
|
||||
# Skip the line if the key is COMMON_VERSION
|
||||
case "$key" in
|
||||
COMMON_VERSION)
|
||||
original_version=$(echo "$value" | xargs)
|
||||
continue
|
||||
;;
|
||||
COMMON_PG_PASSWORD)
|
||||
pgpassword=$(echo $value | xargs)
|
||||
;;
|
||||
POSTGRES_VERSION | REDIS_VERSION | MINIO_VERSION)
|
||||
# Don't update db versions automatically.
|
||||
continue
|
||||
;;
|
||||
esac
|
||||
|
||||
# Remove any existing entry from the new env file and add the new value
|
||||
grep -v "^$key=" "$new_env_file" >"$temp_env_file"
|
||||
mv "$temp_env_file" "$new_env_file"
|
||||
echo "$key=$value" >>"$new_env_file"
|
||||
done <"$original_env_file"
|
||||
# Remove any existing entry from the new env file and add the new value
|
||||
grep -v "^$key=" "$new_env_file" >"$temp_env_file"
|
||||
mv "$temp_env_file" "$new_env_file"
|
||||
echo "$key=$value" >>"$new_env_file"
|
||||
done <"$original_env_file"
|
||||
}
|
||||
|
||||
# Function to normalize version numbers for comparison
|
||||
function normalise_version {
|
||||
echo "$1" | awk -F. '{ printf("%03d%03d%03d\n", $1, $2, $3); }'
|
||||
echo "$1" | awk -F. '{ printf("%03d%03d%03d\n", $1, $2, $3); }'
|
||||
}
|
||||
|
||||
# Function to log messages
|
||||
function log_message() {
|
||||
echo "$@" >&2
|
||||
echo "$@" >&2
|
||||
}
|
||||
|
||||
# Function to create migration versions based on the current and previous application versions
|
||||
function create_migration_versions() {
|
||||
cd "${SCHEMA_DIR:-/opt/openreplay/openreplay/scripts/schema}" || {
|
||||
log_message "not able to cd $SCHEMA_DIR"
|
||||
exit 100
|
||||
}
|
||||
SCHEMA_DIR="../schema/"
|
||||
cd $SCHEMA_DIR || {
|
||||
log_message "not able to cd $SCHEMA_DIR"
|
||||
exit 100
|
||||
}
|
||||
|
||||
db=postgresql
|
||||
# List all version directories excluding 'create' directory
|
||||
all_versions=($(find db/init_dbs/$db -maxdepth 1 -type d -exec basename {} \; | grep -v create))
|
||||
db=postgresql
|
||||
# List all version directories excluding 'create' directory
|
||||
all_versions=($(find db/init_dbs/$db -maxdepth 1 -type d -exec basename {} \; | grep -v create))
|
||||
|
||||
# Normalize the previous application version for comparison
|
||||
PREVIOUS_APP_VERSION_NORMALIZED=$(normalise_version "${PREVIOUS_APP_VERSION}")
|
||||
# Normalize the previous application version for comparison
|
||||
PREVIOUS_APP_VERSION_NORMALIZED=$(normalise_version "${PREVIOUS_APP_VERSION}")
|
||||
|
||||
migration_versions=()
|
||||
for ver in "${all_versions[@]}"; do
|
||||
if [[ $(normalise_version "$ver") > "$PREVIOUS_APP_VERSION_NORMALIZED" ]]; then
|
||||
migration_versions+=("$ver")
|
||||
fi
|
||||
done
|
||||
migration_versions=()
|
||||
for ver in "${all_versions[@]}"; do
|
||||
if [[ $(normalise_version "$ver") > "$PREVIOUS_APP_VERSION_NORMALIZED" ]]; then
|
||||
migration_versions+=("$ver")
|
||||
fi
|
||||
done
|
||||
|
||||
# Join migration versions into a single string separated by commas
|
||||
joined_migration_versions=$(
|
||||
IFS=,
|
||||
echo "${migration_versions[*]}"
|
||||
)
|
||||
# Join migration versions into a single string separated by commas
|
||||
joined_migration_versions=$(
|
||||
IFS=,
|
||||
echo "${migration_versions[*]}"
|
||||
)
|
||||
|
||||
# Return to the previous directory
|
||||
cd - >/dev/null || {
|
||||
log_message "not able to cd back"
|
||||
exit 100
|
||||
}
|
||||
# Return to the previous directory
|
||||
cd - >/dev/null || {
|
||||
log_message "not able to cd back"
|
||||
exit 100
|
||||
}
|
||||
|
||||
log_message "output: $joined_migration_versions"
|
||||
echo "$joined_migration_versions"
|
||||
log_message "output: $joined_migration_versions"
|
||||
echo "$joined_migration_versions"
|
||||
}
|
||||
|
||||
export SCHEMA_DIR="$(readlink -f ../schema/)"
|
||||
echo $SCHEMA_DIR
|
||||
# Function to perform migration
|
||||
function migrate() {
|
||||
# Set schema directory and previous application version
|
||||
export SCHEMA_DIR="../schema/"
|
||||
export PREVIOUS_APP_VERSION=${original_version#v}
|
||||
# Set schema directory and previous application version
|
||||
export PREVIOUS_APP_VERSION=${original_version#v}
|
||||
|
||||
# Create migration versions array
|
||||
IFS=',' read -ra joined_migration_versions <<<"$(create_migration_versions)"
|
||||
# Check if there are versions to migrate
|
||||
[[ ${#joined_migration_versions[@]} -eq 0 ]] && {
|
||||
echo "Nothing to migrate"
|
||||
return
|
||||
}
|
||||
# Loop through versions and prepare Docker run commands
|
||||
for ver in "${joined_migration_versions[@]}"; do
|
||||
echo "$ver"
|
||||
"docker run --rm --network openreplay-net \
|
||||
--name pgmigrate -e 'PGHOST=postgres' -e 'PGPORT=5432' \
|
||||
-e 'PGDATABASE=postgres' -e 'PGUSER=postgres' -e 'PGPASSWORD=$pgpassword' \
|
||||
-v /opt/data/:$SCHEMA_DIR postgres psql -f /opt/data/schema/db/init_dbs/postgresql/$ver/$ver.sql"
|
||||
done
|
||||
# Create migration versions array
|
||||
IFS=',' read -ra joined_migration_versions <<<"$(create_migration_versions)"
|
||||
# Check if there are versions to migrate
|
||||
[[ ${#joined_migration_versions[@]} -eq 0 ]] && {
|
||||
echo "Nothing to migrate"
|
||||
return
|
||||
}
|
||||
# Loop through versions and prepare Docker run commands
|
||||
for ver in "${joined_migration_versions[@]}"; do
|
||||
echo "$ver"
|
||||
docker run --rm --network docker-compose_opereplay-net \
|
||||
--name pgmigrate -e PGHOST=postgres -e PGPORT=5432 \
|
||||
-e PGDATABASE=postgres -e PGUSER=postgres -e PGPASSWORD=$pgpassword \
|
||||
-v $SCHEMA_DIR:/opt/data/ postgres psql -f /opt/data/db/init_dbs/postgresql/$ver/$ver.sql
|
||||
done
|
||||
}
|
||||
|
||||
# Merge environment variables and perform migration
|
||||
merge_envs
|
||||
migrate
|
||||
|
||||
# Load variables from common.env into the current shell's environment
|
||||
set -a # automatically export all variables
|
||||
source common.env
|
||||
set +a
|
||||
|
||||
# Use the `envsubst` command to substitute the shell environment variables into reference_var.env and output to a combined .env
|
||||
find ./ -type f \( -iname "*.env" -o -iname "docker-compose.yaml" \) ! -name "common.env" -exec /bin/bash -c 'file="{}";cp "$file" "$file.bak"; envsubst < "$file.bak" > "$file"; rm "$file.bak"' \;
|
||||
|
||||
sudo -E docker-compose up -d
|
||||
|
|
|
|||
|
|
@ -13,21 +13,20 @@ OR_REPO="${OR_REPO:-'https://github.com/openreplay/openreplay'}"
|
|||
# UPGRADE_OR_ONLY=1 openreplay -u
|
||||
|
||||
[[ -d $OR_DIR ]] || {
|
||||
sudo mkdir $OR_DIR
|
||||
sudo mkdir $OR_DIR
|
||||
}
|
||||
export PATH=/var/lib/openreplay:$PATH
|
||||
function xargs() {
|
||||
/var/lib/openreplay/busybox xargs
|
||||
/var/lib/openreplay/busybox xargs
|
||||
}
|
||||
|
||||
[[ $(awk '/enterpriseEditionLicense/{print $2}' < "/var/lib/openreplay/vars.yaml") != "" ]] && EE=true
|
||||
[[ $(awk '/enterpriseEditionLicense/{print $2}' <"/var/lib/openreplay/vars.yaml") != "" ]] && EE=true
|
||||
|
||||
tools=(
|
||||
zyedidia/eget
|
||||
stern/stern
|
||||
derailed/k9s
|
||||
hidetatz/kubecolor
|
||||
)
|
||||
zyedidia/eget
|
||||
stern/stern
|
||||
hidetatz/kubecolor
|
||||
)
|
||||
|
||||
# Ref: https://stackoverflow.com/questions/5947742/how-to-change-the-output-color-of-echo-in-linux
|
||||
RED='\033[0;31m'
|
||||
|
|
@ -38,50 +37,50 @@ NC='\033[0m' # No Color
|
|||
|
||||
# Checking whether the app exists or we do have to upgade.
|
||||
function exists() {
|
||||
which "${1}" &> /dev/null
|
||||
return $?
|
||||
which "${1}" &>/dev/null
|
||||
return $?
|
||||
}
|
||||
|
||||
function err_cd() {
|
||||
if ! cd "$1" &> /dev/null ; then
|
||||
log err not able to cd to "$1"
|
||||
exit 100
|
||||
fi
|
||||
if ! cd "$1" &>/dev/null; then
|
||||
log err not able to cd to "$1"
|
||||
exit 100
|
||||
fi
|
||||
}
|
||||
|
||||
function log () {
|
||||
case "$1" in
|
||||
function log() {
|
||||
case "$1" in
|
||||
info)
|
||||
shift
|
||||
echo -e "${GREEN}[INFO]" "$@" "${NC}"
|
||||
return
|
||||
;;
|
||||
shift
|
||||
echo -e "${GREEN}[INFO]" "$@" "${NC}"
|
||||
return
|
||||
;;
|
||||
warn)
|
||||
shift
|
||||
echo -e "${YELLOW}[WARN]" "$@" "${NC}"
|
||||
return
|
||||
;;
|
||||
shift
|
||||
echo -e "${YELLOW}[WARN]" "$@" "${NC}"
|
||||
return
|
||||
;;
|
||||
debug)
|
||||
shift
|
||||
echo -e "${YELLOW}[DEBUG]" "$@" "${NC}"
|
||||
return
|
||||
;;
|
||||
shift
|
||||
echo -e "${YELLOW}[DEBUG]" "$@" "${NC}"
|
||||
return
|
||||
;;
|
||||
title)
|
||||
shift
|
||||
echo -e "\n${BWHITE}-" "$@" "${NC}"
|
||||
return
|
||||
;;
|
||||
shift
|
||||
echo -e "\n${BWHITE}-" "$@" "${NC}"
|
||||
return
|
||||
;;
|
||||
err)
|
||||
shift
|
||||
echo -e "${RED}[ERROR]" "$@" "${NC}"
|
||||
exit 100
|
||||
;;
|
||||
shift
|
||||
echo -e "${RED}[ERROR]" "$@" "${NC}"
|
||||
exit 100
|
||||
;;
|
||||
*)
|
||||
echo "Not supported log format"
|
||||
;;
|
||||
esac
|
||||
echo "[Error]" "$@"
|
||||
exit 100
|
||||
echo "Not supported log format"
|
||||
;;
|
||||
esac
|
||||
echo "[Error]" "$@"
|
||||
exit 100
|
||||
}
|
||||
|
||||
# To run kubeconfig run
|
||||
|
|
@ -96,33 +95,35 @@ tmp_dir=$(mktemp -d)
|
|||
|
||||
function install_packages() {
|
||||
|
||||
[[ -e "$OR_DIR/eget" ]] || {
|
||||
cd "$tmp_dir" || log err "Not able to cd to tmp dir $tmp_dir"
|
||||
curl --version &> /dev/null || log err "curl not found. Please install"
|
||||
curl -SsL https://zyedidia.github.io/eget.sh | sh - > /dev/null
|
||||
sudo mv eget $OR_DIR
|
||||
err_cd -
|
||||
}
|
||||
[[ -e "$OR_DIR/eget" ]] || {
|
||||
cd "$tmp_dir" || log err "Not able to cd to tmp dir $tmp_dir"
|
||||
curl --version &>/dev/null || log err "curl not found. Please install"
|
||||
curl -SsL https://zyedidia.github.io/eget.sh | sh - >/dev/null
|
||||
sudo mv eget $OR_DIR
|
||||
err_cd -
|
||||
}
|
||||
|
||||
for package in "${tools[@]}"; do
|
||||
log info Installing "$(awk -F/ '{print $2}' <<< $package)"
|
||||
sudo /var/lib/openreplay/eget -q --upgrade-only --to "${OR_DIR}" "$package"
|
||||
done
|
||||
log info Installing yq
|
||||
sudo /var/lib/openreplay/eget -q --upgrade-only --to "$OR_DIR" mikefarah/yq --asset=^tar.gz
|
||||
log info Installing helm
|
||||
sudo /var/lib/openreplay/eget -q --upgrade-only --to "$OR_DIR" https://get.helm.sh/helm-v3.10.2-linux-amd64.tar.gz -f helm
|
||||
log info Installing kubectl
|
||||
sudo /var/lib/openreplay/eget -q --upgrade-only --to "$OR_DIR" https://dl.k8s.io/release/v1.25.0/bin/linux/amd64/kubectl
|
||||
log info Installing Busybox
|
||||
sudo /var/lib/openreplay/eget -q --upgrade-only --to "$OR_DIR" https://busybox.net/downloads/binaries/1.35.0-x86_64-linux-musl/busybox
|
||||
date | sudo tee $OR_DIR/packages.lock &> /dev/null
|
||||
for package in "${tools[@]}"; do
|
||||
log info Installing "$(awk -F/ '{print $2}' <<<$package)"
|
||||
sudo /var/lib/openreplay/eget -q --upgrade-only --to "${OR_DIR}" "$package"
|
||||
done
|
||||
log info Installing k9s
|
||||
sudo /var/lib/openreplay/eget -q --upgrade-only --to "$OR_DIR" derailed/k9s --asset=tar.gz --asset=^sbom
|
||||
log info Installing yq
|
||||
sudo /var/lib/openreplay/eget -q --upgrade-only --to "$OR_DIR" mikefarah/yq --asset=^tar.gz
|
||||
log info Installing helm
|
||||
sudo /var/lib/openreplay/eget -q --upgrade-only --to "$OR_DIR" https://get.helm.sh/helm-v3.10.2-linux-amd64.tar.gz -f helm
|
||||
log info Installing kubectl
|
||||
sudo /var/lib/openreplay/eget -q --upgrade-only --to "$OR_DIR" https://dl.k8s.io/release/v1.25.0/bin/linux/amd64/kubectl
|
||||
log info Installing Busybox
|
||||
sudo /var/lib/openreplay/eget -q --upgrade-only --to "$OR_DIR" https://busybox.net/downloads/binaries/1.35.0-x86_64-linux-musl/busybox
|
||||
date | sudo tee $OR_DIR/packages.lock &>/dev/null
|
||||
}
|
||||
|
||||
function help() {
|
||||
|
||||
echo -e ${BWHITE}
|
||||
cat <<"EOF"
|
||||
echo -e ${BWHITE}
|
||||
cat <<"EOF"
|
||||
___ ____ _
|
||||
/ _ \ _ __ ___ _ __ | _ \ ___ _ __ | | __ _ _ _
|
||||
| | | | '_ \ / _ \ '_ \| |_) / _ \ '_ \| |/ _` | | | |
|
||||
|
|
@ -130,9 +131,9 @@ cat <<"EOF"
|
|||
\___/| .__/ \___|_| |_|_| \_\___| .__/|_|\__,_|\__, |
|
||||
|_| |_| |___/
|
||||
EOF
|
||||
echo -e ${NC}
|
||||
echo -e ${NC}
|
||||
|
||||
log info "
|
||||
log info "
|
||||
Usage: openreplay [ -h | --help ]
|
||||
[ -s | --status ]
|
||||
[ -i | --install DOMAIN_NAME ]
|
||||
|
|
@ -149,335 +150,342 @@ log info "
|
|||
http integrations nginx-controller
|
||||
peers sink sourcemapreader storage
|
||||
"
|
||||
return
|
||||
return
|
||||
}
|
||||
|
||||
function status() {
|
||||
log info OpenReplay Version
|
||||
# awk '(NR<2)' < "$OR_DIR/vars.yaml"
|
||||
awk '/fromVersion/{print $2}' < "${OR_DIR}/vars.yaml"
|
||||
log info Disk
|
||||
df -h /var
|
||||
log info Memory
|
||||
free -mh
|
||||
log info CPU
|
||||
uname -a
|
||||
# Print only the fist line.
|
||||
awk '(NR<2)' < /etc/os-release
|
||||
echo "CPU Count: $(nproc)"
|
||||
log info Kubernetes
|
||||
kubecolor version --short
|
||||
log info Openreplay Component
|
||||
kubecolor get po -n "${APP_NS}"
|
||||
kubecolor get po -n "${DB_NS}"
|
||||
return
|
||||
log info OpenReplay Version
|
||||
# awk '(NR<2)' < "$OR_DIR/vars.yaml"
|
||||
awk '/fromVersion/{print $2}' <"${OR_DIR}/vars.yaml"
|
||||
log info Disk
|
||||
df -h /var
|
||||
log info Memory
|
||||
free -mh
|
||||
log info CPU
|
||||
uname -a
|
||||
# Print only the fist line.
|
||||
awk '(NR<2)' </etc/os-release
|
||||
echo "CPU Count: $(nproc)"
|
||||
log info Kubernetes
|
||||
kubecolor version --short
|
||||
log info Openreplay Component
|
||||
kubecolor get po -n "${APP_NS}"
|
||||
kubecolor get po -n "${DB_NS}"
|
||||
return
|
||||
}
|
||||
|
||||
# Function to upgrade helm openreplay app.
|
||||
function or_helm_upgrade() {
|
||||
set -o pipefail
|
||||
log_file="${tmp_dir}/helm.log"
|
||||
state=$1
|
||||
chart_names=(
|
||||
toolings
|
||||
openreplay
|
||||
set -o pipefail
|
||||
log_file="${tmp_dir}/helm.log"
|
||||
state=$1
|
||||
chart_names=(
|
||||
toolings
|
||||
openreplay
|
||||
)
|
||||
[[ $UPGRADE_OR_ONLY -eq 1 ]] && chart_names=( openreplay )
|
||||
# Cleaning up toolings
|
||||
[[ $CLEANUP_TOOLING -eq 1 ]] && {
|
||||
helm uninstall toolings -n "$APP_NS"
|
||||
}
|
||||
if [[ $state == "reload" ]]; then
|
||||
chart_names=( openreplay )
|
||||
HELM_OPTIONS="${HELM_OPTIONS} --set skipMigration=true"
|
||||
fi
|
||||
for chart in "${chart_names[@]}"; do
|
||||
[[ -z $OR_VERSION ]] || HELM_OPTIONS="${HELM_OPTIONS} --set dbMigrationUpstreamBranch=${OR_VERSION}"
|
||||
log info helm upgrade --install "$chart" ./"$chart" -n "$APP_NS" --wait -f ./vars.yaml --atomic --debug $HELM_OPTIONS 2>&1 | tee -a "${log_file}"
|
||||
if ! helm upgrade --install "$chart" ./"$chart" -n "$APP_NS" --wait -f ./vars.yaml --atomic --debug $HELM_OPTIONS 2>&1 | tee -a "${log_file}"; then
|
||||
log err "
|
||||
[[ $UPGRADE_OR_ONLY -eq 1 ]] && chart_names=(openreplay)
|
||||
# Cleaning up toolings
|
||||
[[ $CLEANUP_TOOLING -eq 1 ]] && {
|
||||
helm uninstall toolings -n "$APP_NS"
|
||||
}
|
||||
if [[ $state == "reload" ]]; then
|
||||
chart_names=(openreplay)
|
||||
HELM_OPTIONS="${HELM_OPTIONS} --set skipMigration=true"
|
||||
fi
|
||||
for chart in "${chart_names[@]}"; do
|
||||
[[ -z $OR_VERSION ]] || HELM_OPTIONS="${HELM_OPTIONS} --set dbMigrationUpstreamBranch=${OR_VERSION}"
|
||||
log info helm upgrade --install "$chart" ./"$chart" -n "$APP_NS" --wait -f ./vars.yaml --atomic --debug $HELM_OPTIONS 2>&1 | tee -a "${log_file}"
|
||||
if ! helm upgrade --install "$chart" ./"$chart" -n "$APP_NS" --wait -f ./vars.yaml --atomic --debug $HELM_OPTIONS 2>&1 | tee -a "${log_file}"; then
|
||||
log err "
|
||||
Installation failed, run ${BWHITE}cat ${log_file}${RED} for more info
|
||||
|
||||
If logs aren't verbose, run ${BWHITE}openreplay --status${RED}
|
||||
|
||||
If pods are in failed state, run ${BWHITE}openreplay --logs <pod-name>${RED}
|
||||
"
|
||||
fi
|
||||
done
|
||||
set +o pipefail
|
||||
return
|
||||
fi
|
||||
done
|
||||
set +o pipefail
|
||||
return
|
||||
}
|
||||
|
||||
function upgrade_old() {
|
||||
old_vars_path="$1"
|
||||
[[ -f $old_vars_path ]] || log err "No configuration file ${BWHITE}$old_vars_path${RED}.
|
||||
old_vars_path="$1"
|
||||
[[ -f $old_vars_path ]] || log err "No configuration file ${BWHITE}$old_vars_path${RED}.
|
||||
If you're updating from version older than ${BWHITE}v1.10.0${RED}, for example ${BWHITE}v1.9.0${RED}:
|
||||
${BWHITE}RELEASE_UPGRADE=1 openreplay --deprecated-upgrade ~/openreplay_v1.9.0/scripts/helmcharts/vars.yaml${RED}.
|
||||
If you're having a custom installation,
|
||||
${BWHITE}RELEASE_UPGRADE=1 openreplay --deprecated-upgrade /path/to/vars.yaml${RED}.
|
||||
"
|
||||
or_version=$(busybox awk '/fromVersion/{print $2}' < "${old_vars_path}")
|
||||
sudo cp "${old_vars_path}" ${OR_DIR}/vars.yaml.backup."${or_version//\"}"_"$(date +%Y%m%d-%H%M%S)" || log err "Not able to copy old vars.yaml"
|
||||
sudo cp "${old_vars_path}" ${OR_DIR}/vars.yaml || log err "Not able to copy old vars.yaml"
|
||||
upgrade
|
||||
or_version=$(busybox awk '/fromVersion/{print $2}' <"${old_vars_path}")
|
||||
sudo cp "${old_vars_path}" ${OR_DIR}/vars.yaml.backup."${or_version//\"/}"_"$(date +%Y%m%d-%H%M%S)" || log err "Not able to copy old vars.yaml"
|
||||
sudo cp "${old_vars_path}" ${OR_DIR}/vars.yaml || log err "Not able to copy old vars.yaml"
|
||||
upgrade
|
||||
}
|
||||
|
||||
function clone_repo() {
|
||||
err_cd "$tmp_dir"
|
||||
log info "Working directory $tmp_dir"
|
||||
git_options="-b ${OR_VERSION:-main}"
|
||||
log info "git clone ${OR_REPO} --depth 1 $git_options"
|
||||
eval git clone "${OR_REPO}" --depth 1 $git_options
|
||||
return
|
||||
err_cd "$tmp_dir"
|
||||
log info "Working directory $tmp_dir"
|
||||
git_options="-b ${OR_VERSION:-main}"
|
||||
log info "git clone ${OR_REPO} --depth 1 $git_options"
|
||||
eval git clone "${OR_REPO}" --depth 1 $git_options
|
||||
return
|
||||
}
|
||||
|
||||
function install() {
|
||||
domain_name=$1
|
||||
# Check existing installation
|
||||
[[ -f ${OR_DIR}/vars.yaml ]] && {
|
||||
or_version=$(busybox awk '/fromVersion/{print $2}' < "${OR_DIR}/vars.yaml")
|
||||
log err "Openreplay installation ${BWHITE}${or_version}${RED} found. If you want to upgrade, run ${BWHITE}openreplay -u${RED}"
|
||||
}
|
||||
# Installing OR
|
||||
log title "Installing OpenReplay"
|
||||
clone_repo
|
||||
err_cd "$tmp_dir/openreplay/scripts/helmcharts"
|
||||
DOMAIN_NAME=$domain_name bash init.sh
|
||||
return
|
||||
domain_name=$1
|
||||
# Check existing installation
|
||||
[[ -f ${OR_DIR}/vars.yaml ]] && {
|
||||
or_version=$(busybox awk '/fromVersion/{print $2}' <"${OR_DIR}/vars.yaml")
|
||||
log err "Openreplay installation ${BWHITE}${or_version}${RED} found. If you want to upgrade, run ${BWHITE}openreplay -u${RED}"
|
||||
}
|
||||
# Installing OR
|
||||
log title "Installing OpenReplay"
|
||||
clone_repo
|
||||
err_cd "$tmp_dir/openreplay/scripts/helmcharts"
|
||||
DOMAIN_NAME=$domain_name bash init.sh
|
||||
return
|
||||
}
|
||||
|
||||
function cleanup() {
|
||||
# Confirmation for deletion. Do you want to delete Postgres/Minio(session) data before $date ?
|
||||
delete_from_number_days=$1
|
||||
delete_from_date=$(date +%Y-%m-%d -d "$delete_from_number_days day ago")
|
||||
# Confirmation for deletion. Do you want to delete Postgres/Minio(session) data before $date ?
|
||||
delete_from_number_days=$1
|
||||
delete_from_date=$(date +%Y-%m-%d -d "$delete_from_number_days day ago")
|
||||
|
||||
# Check if --force flag is present
|
||||
if [[ $2 == --force ]]; then
|
||||
log info "Deleting data without confirmation..."
|
||||
else
|
||||
log debug "Do you want to delete the data captured on and before ${BWHITE}$delete_from_date${YELLOW}?"
|
||||
read -p "Are you sure[y/n]? " -n 1 -r
|
||||
echo # (optional) move to a new line
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
log err "Cancelling data deletion"
|
||||
return 1 # Exit with an error code to indicate cancellation
|
||||
# Check if --force flag is present
|
||||
if [[ $2 == --force ]]; then
|
||||
log info "Deleting data without confirmation..."
|
||||
else
|
||||
log debug "Do you want to delete the data captured on and before ${BWHITE}$delete_from_date${YELLOW}?"
|
||||
read -p "Are you sure[y/n]? " -n 1 -r
|
||||
echo # (optional) move to a new line
|
||||
if [[ ! $REPLY =~ ^[Yy]$ ]]; then
|
||||
log err "Cancelling data deletion"
|
||||
return 1 # Exit with an error code to indicate cancellation
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Run pg cleanup
|
||||
pguser=$(awk '/postgresqlUser/{print $2}' < "${OR_DIR}/vars.yaml" | xargs)
|
||||
pgpassword=$(awk '/postgresqlPassword/{print $2}' < "${OR_DIR}/vars.yaml" | xargs)
|
||||
pghost=$(awk '/postgresqlHost/{print $2}' < "${OR_DIR}/vars.yaml" | xargs)
|
||||
pgport=$(awk '/postgresqlPort/{print $2}' < "${OR_DIR}/vars.yaml" | xargs)
|
||||
pgdatabase=$(awk '/postgresqlDatabase/{print $2}' < "${OR_DIR}/vars.yaml" | xargs)
|
||||
cleanup_query="DELETE FROM public.sessions WHERE start_ts < extract(epoch from '${delete_from_date}'::date) * 1000;"
|
||||
[[ $EE ]] && cleanup_query="DELETE FROM public.sessions WHERE start_ts < extract(epoch from '${delete_from_date}'::date) * 1000 AND session_id NOT IN (SELECT session_id FROM user_favorite_sessions);"
|
||||
kubectl delete po -n "${APP_NS}" pg-cleanup &> /dev/null || true
|
||||
kubectl run pg-cleanup -n "${APP_NS}" \
|
||||
--restart=Never \
|
||||
--env PGHOST="$pghost"\
|
||||
--env PGUSER="$pguser"\
|
||||
--env PGDATABASE="$pgdatabase"\
|
||||
--env PGPASSWORD="$pgpassword"\
|
||||
--env PGPORT="$pgport"\
|
||||
--image bitnami/postgresql -- psql -c "$cleanup_query"
|
||||
# Run minio cleanup
|
||||
MINIO_ACCESS_KEY=$(awk '/accessKey/{print $NF}' < "${OR_DIR}/vars.yaml" | tail -n1 | xargs)
|
||||
MINIO_SECRET_KEY=$(awk '/secretKey/{print $NF}' < "${OR_DIR}/vars.yaml" | tail -n1 | xargs)
|
||||
MINIO_HOST=$(awk '/endpoint/{print $NF}' < "${OR_DIR}/vars.yaml" | tail -n1 | xargs)
|
||||
kubectl delete po -n "${APP_NS}" minio-cleanup &> /dev/null || true
|
||||
kubectl run minio-cleanup -n "${APP_NS}" \
|
||||
--restart=Never \
|
||||
--env MINIO_HOST="$pghost" \
|
||||
--image bitnami/minio:2020.10.9-debian-10-r6 -- /bin/sh -c "
|
||||
# Run pg cleanup
|
||||
pguser=$(awk '/postgresqlUser/{print $2}' <"${OR_DIR}/vars.yaml" | xargs)
|
||||
pgpassword=$(awk '/postgresqlPassword/{print $2}' <"${OR_DIR}/vars.yaml" | xargs)
|
||||
pghost=$(awk '/postgresqlHost/{print $2}' <"${OR_DIR}/vars.yaml" | xargs)
|
||||
pgport=$(awk '/postgresqlPort/{print $2}' <"${OR_DIR}/vars.yaml" | xargs)
|
||||
pgdatabase=$(awk '/postgresqlDatabase/{print $2}' <"${OR_DIR}/vars.yaml" | xargs)
|
||||
cleanup_query="DELETE FROM public.sessions WHERE start_ts < extract(epoch from '${delete_from_date}'::date) * 1000;"
|
||||
[[ $EE ]] && cleanup_query="DELETE FROM public.sessions WHERE start_ts < extract(epoch from '${delete_from_date}'::date) * 1000 AND session_id NOT IN (SELECT session_id FROM user_favorite_sessions);"
|
||||
kubectl delete po -n "${APP_NS}" pg-cleanup &>/dev/null || true
|
||||
kubectl run pg-cleanup -n "${APP_NS}" \
|
||||
--restart=Never \
|
||||
--env PGHOST="$pghost" \
|
||||
--env PGUSER="$pguser" \
|
||||
--env PGDATABASE="$pgdatabase" \
|
||||
--env PGPASSWORD="$pgpassword" \
|
||||
--env PGPORT="$pgport" \
|
||||
--image bitnami/postgresql -- psql -c "$cleanup_query"
|
||||
# Run minio cleanup
|
||||
MINIO_ACCESS_KEY=$(awk '/accessKey/{print $NF}' <"${OR_DIR}/vars.yaml" | tail -n1 | xargs)
|
||||
MINIO_SECRET_KEY=$(awk '/secretKey/{print $NF}' <"${OR_DIR}/vars.yaml" | tail -n1 | xargs)
|
||||
MINIO_HOST=$(awk '/endpoint/{print $NF}' <"${OR_DIR}/vars.yaml" | tail -n1 | xargs)
|
||||
kubectl delete po -n "${APP_NS}" minio-cleanup &>/dev/null || true
|
||||
kubectl run minio-cleanup -n "${APP_NS}" \
|
||||
--restart=Never \
|
||||
--env MINIO_HOST="$pghost" \
|
||||
--image bitnami/minio:2020.10.9-debian-10-r6 -- /bin/sh -c "
|
||||
mc alias set minio $MINIO_HOST $MINIO_ACCESS_KEY $MINIO_SECRET_KEY &&
|
||||
mc rm --recursive --dangerous --force --older-than ${delete_from_number_days}d minio/mobs
|
||||
"
|
||||
log info "Postgres data cleanup process initiated. Postgres will automatically vacuum deleted rows when the database is idle. This may take up a few days to free the disk space."
|
||||
log info "Minio (where recordings are stored) cleanup process initiated."
|
||||
log info "Run ${BWHITE}openreplay -s${GREEN} to check the status of the cleanup process and available disk space."
|
||||
return
|
||||
log info "Postgres data cleanup process initiated. Postgres will automatically vacuum deleted rows when the database is idle. This may take up a few days to free the disk space."
|
||||
log info "Minio (where recordings are stored) cleanup process initiated."
|
||||
log info "Run ${BWHITE}openreplay -s${GREEN} to check the status of the cleanup process and available disk space."
|
||||
return
|
||||
}
|
||||
|
||||
function upgrade() {
|
||||
# TODO:
|
||||
# 1. store vars.yaml in central place.
|
||||
# 3. In upgrade you'll have to clone the repo
|
||||
# 3. How to update package. Because openreplay -u will be done from old update script
|
||||
# 4. Update from Version
|
||||
exists git || log err "Git not found. Please install"
|
||||
[[ -f ${OR_DIR}/vars.yaml ]] || log err "No configuration file ${BWHITE}${OR_DIR}/vars.yaml${RED}.
|
||||
# TODO:
|
||||
# 1. store vars.yaml in central place.
|
||||
# 3. In upgrade you'll have to clone the repo
|
||||
# 3. How to update package. Because openreplay -u will be done from old update script
|
||||
# 4. Update from Version
|
||||
exists git || log err "Git not found. Please install"
|
||||
[[ -f ${OR_DIR}/vars.yaml ]] || log err "No configuration file ${BWHITE}${OR_DIR}/vars.yaml${RED}.
|
||||
If you're updating from version older than ${BWHITE}v1.10.0${RED}, for example ${BWHITE}v1.9.0${RED}:
|
||||
${BWHITE}RELEASE_UPGRADE=1 openreplay --deprecated-upgrade ~/openreplay_v1.9.0/scripts/helmcharts/vars.yaml${RED}.
|
||||
If you're having a custom installation,
|
||||
${BWHITE}RELEASE_UPGRADE=1 openreplay --deprecated-upgrade /path/to/vars.yaml${RED}.
|
||||
"
|
||||
or_version=$(busybox awk '/fromVersion/{print $2}' < "${OR_DIR}/vars.yaml") || {
|
||||
log err "${BWHITE}${OR_DIR}/vars.yaml${RED} not found.
|
||||
or_version=$(busybox awk '/fromVersion/{print $2}' <"${OR_DIR}/vars.yaml") || {
|
||||
log err "${BWHITE}${OR_DIR}/vars.yaml${RED} not found.
|
||||
Please do ${BWHITE}openreplay --deprecated-upgrade /path/to/vars.yaml${RED}
|
||||
"
|
||||
}
|
||||
}
|
||||
|
||||
# Unless its upgrade release, always checkout same tag.
|
||||
[[ $RELEASE_UPGRADE -eq 1 ]] || OR_VERSION=${OR_VERSION:-$or_version}
|
||||
# Unless its upgrade release, always checkout same tag.
|
||||
[[ $RELEASE_UPGRADE -eq 1 ]] || OR_VERSION=${OR_VERSION:-$or_version}
|
||||
|
||||
time_now=$(date +%m-%d-%Y-%I%M%S)
|
||||
# Creating backup dir of current installation
|
||||
[[ -d "$OR_DIR/openreplay" ]] && sudo mv "$OR_DIR/openreplay" "$OR_DIR/openreplay_${or_version//\"}_${time_now}"
|
||||
time_now=$(date +%m-%d-%Y-%I%M%S)
|
||||
# Creating backup dir of current installation
|
||||
[[ -d "$OR_DIR/openreplay" ]] && sudo mv "$OR_DIR/openreplay" "$OR_DIR/openreplay_${or_version//\"/}_${time_now}"
|
||||
|
||||
clone_repo
|
||||
err_cd openreplay/scripts/helmcharts
|
||||
install_packages
|
||||
[[ -d /openreplay ]] && sudo chown -R 1001:1001 /openreplay
|
||||
clone_repo
|
||||
err_cd openreplay/scripts/helmcharts
|
||||
install_packages
|
||||
[[ -d /openreplay ]] && sudo chown -R 1001:1001 /openreplay
|
||||
|
||||
# Merge prefrerences
|
||||
cp $OR_DIR/vars.yaml old_vars.yaml
|
||||
or_new_version=$(awk '/fromVersion/{print $2}' < "vars.yaml")
|
||||
yq '(load("old_vars.yaml") | .. | select(tag != "!!map" and tag != "!!seq")) as $i ireduce(.; setpath($i | path; $i))' vars.yaml > new_vars.yaml
|
||||
mv new_vars.yaml vars.yaml
|
||||
or_helm_upgrade
|
||||
# Merge prefrerences
|
||||
cp $OR_DIR/vars.yaml old_vars.yaml
|
||||
or_new_version=$(awk '/fromVersion/{print $2}' <"vars.yaml")
|
||||
yq '(load("old_vars.yaml") | .. | select(tag != "!!map" and tag != "!!seq")) as $i ireduce(.; setpath($i | path; $i))' vars.yaml >new_vars.yaml
|
||||
mv new_vars.yaml vars.yaml
|
||||
or_helm_upgrade
|
||||
|
||||
# Update the version
|
||||
busybox sed -i "s/fromVersion.*/fromVersion: ${or_new_version}/" vars.yaml
|
||||
sudo mv ./openreplay-cli /bin/
|
||||
sudo mv ./vars.yaml "$OR_DIR"
|
||||
sudo cp -rf ../../../openreplay $OR_DIR/
|
||||
log info "Configuration file is saved in /var/lib/openreplay/vars.yaml"
|
||||
log info "Run ${BWHITE}openreplay -h${GREEN} to see the cli information to manage OpenReplay."
|
||||
# Update the version
|
||||
busybox sed -i "s/fromVersion.*/fromVersion: ${or_new_version}/" vars.yaml
|
||||
sudo mv ./openreplay-cli /bin/
|
||||
sudo mv ./vars.yaml "$OR_DIR"
|
||||
sudo cp -rf ../../../openreplay $OR_DIR/
|
||||
log info "Configuration file is saved in /var/lib/openreplay/vars.yaml"
|
||||
log info "Run ${BWHITE}openreplay -h${GREEN} to see the cli information to manage OpenReplay."
|
||||
|
||||
err_cd -
|
||||
return
|
||||
err_cd -
|
||||
return
|
||||
}
|
||||
|
||||
function reload() {
|
||||
err_cd $OR_DIR/openreplay/scripts/helmcharts
|
||||
sudo cp -f $OR_DIR/vars.yaml .
|
||||
or_helm_upgrade reload
|
||||
return
|
||||
err_cd $OR_DIR/openreplay/scripts/helmcharts
|
||||
sudo cp -f $OR_DIR/vars.yaml .
|
||||
or_helm_upgrade reload
|
||||
return
|
||||
}
|
||||
|
||||
function clean_tmp_dir() {
|
||||
[[ -z $SKIP_DELETE_TMP_DIR ]] && rm -rf "${tmp_dir}"
|
||||
[[ -z $SKIP_DELETE_TMP_DIR ]] && rm -rf "${tmp_dir}"
|
||||
}
|
||||
|
||||
[[ -f $OR_DIR/packages.lock ]] || {
|
||||
log title Installing packages "${NC}"
|
||||
install_packages
|
||||
log title Installing packages "${NC}"
|
||||
install_packages
|
||||
}
|
||||
|
||||
PARSED_ARGUMENTS=$(busybox getopt -a -n openreplay -o Rrevpi:uhsl:U:c: --long reload,edit,restart,verbose,install-packages,install:,upgrade,help,status,logs,deprecated-upgrade:,cleanup:,force -- "$@")
|
||||
VALID_ARGUMENTS=$?
|
||||
if [[ "$VALID_ARGUMENTS" != "0" ]]; then
|
||||
help
|
||||
exit 100
|
||||
help
|
||||
exit 100
|
||||
fi
|
||||
|
||||
eval set -- "$PARSED_ARGUMENTS"
|
||||
while :
|
||||
do
|
||||
case "$1" in
|
||||
-v | --verbose) VERBOSE=1; echo $VERBOSE; clean_tmp_dir ; shift ;;
|
||||
while :; do
|
||||
case "$1" in
|
||||
-v | --verbose)
|
||||
VERBOSE=1
|
||||
echo $VERBOSE
|
||||
clean_tmp_dir
|
||||
shift
|
||||
;;
|
||||
-h | --help)
|
||||
help
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
help
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
-i | --install)
|
||||
log title "Installing OpenReplay"
|
||||
install "$2"
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
log title "Installing OpenReplay"
|
||||
install "$2"
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
-p | --install-packages)
|
||||
log title "Updating/Installing dependency packages"
|
||||
install_packages
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
log title "Updating/Installing dependency packages"
|
||||
install_packages
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
-u | --upgrade)
|
||||
if [[ $RELEASE_UPGRADE -eq 1 ]]; then
|
||||
log title "Upgrading OpenReplay to Latest Release"
|
||||
CLEANUP_TOOLING=1
|
||||
else
|
||||
log title "Applying Latest OpenReplay Patches"
|
||||
UPGRADE_OR_ONLY=${UPGRADE_OR_ONLY:-1}
|
||||
fi
|
||||
upgrade
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
if [[ $RELEASE_UPGRADE -eq 1 ]]; then
|
||||
log title "Upgrading OpenReplay to Latest Release"
|
||||
CLEANUP_TOOLING=1
|
||||
else
|
||||
log title "Applying Latest OpenReplay Patches"
|
||||
UPGRADE_OR_ONLY=${UPGRADE_OR_ONLY:-1}
|
||||
fi
|
||||
upgrade
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
-U | --deprecated-upgrade)
|
||||
log title "[Deprected] Upgrading OpenReplay"
|
||||
upgrade_old "$2"
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
log title "[Deprected] Upgrading OpenReplay"
|
||||
upgrade_old "$2"
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
-c | --cleanup)
|
||||
log title "Cleaning up data older than $2 days"
|
||||
cleanup "$2" "$3"
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
log title "Cleaning up data older than $2 days"
|
||||
cleanup "$2" "$3"
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
-r | --restart)
|
||||
log title "Restarting OpenReplay Components"
|
||||
kubecolor rollout restart deployment -n "${APP_NS}"
|
||||
kubecolor rollout status deployment -n "${APP_NS}"
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
log title "Restarting OpenReplay Components"
|
||||
kubecolor rollout restart deployment -n "${APP_NS}"
|
||||
kubecolor rollout status deployment -n "${APP_NS}"
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
-R | --reload)
|
||||
log title "Reloading OpenReplay Components"
|
||||
reload
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
log title "Reloading OpenReplay Components"
|
||||
reload
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
-e | --edit)
|
||||
log title "Editing OpenReplay"
|
||||
[[ -f ${OR_DIR}/vars.yaml ]] || {
|
||||
log err "
|
||||
log title "Editing OpenReplay"
|
||||
[[ -f ${OR_DIR}/vars.yaml ]] || {
|
||||
log err "
|
||||
Couldn't open ${BWHITE}${OR_DIR}/vars.yaml${RED}. Seems like a custom installation.
|
||||
Edit the proper ${BWHITE}vars.yaml${RED} and run ${BWHITE}openreplay -R${RED}
|
||||
Or ${BWHITE}helm upgrade openreplay -n app openreplay/scripts/helmcharts/openreplay -f openreplay/scripts/helmcharts/vars.yaml --debug --atomic"
|
||||
exit 100
|
||||
}
|
||||
/var/lib/openreplay/busybox md5sum /var/lib/openreplay/vars.yaml > "${tmp_dir}/var.yaml.md5"
|
||||
sudo vim -n ${OR_DIR}/vars.yaml
|
||||
/var/lib/openreplay/yq 'true' /var/lib/openreplay/vars.yaml &> /dev/null || {
|
||||
log debug "seems like the edit is not correct. Rerun ${BWHITE}openreplay -e${YELLOW} and fix the issue in config file."
|
||||
exit 100
|
||||
}
|
||||
/var/lib/openreplay/busybox md5sum /var/lib/openreplay/vars.yaml >"${tmp_dir}/var.yaml.md5"
|
||||
sudo vim -n ${OR_DIR}/vars.yaml
|
||||
/var/lib/openreplay/yq 'true' /var/lib/openreplay/vars.yaml &>/dev/null || {
|
||||
log debug "seems like the edit is not correct. Rerun ${BWHITE}openreplay -e${YELLOW} and fix the issue in config file."
|
||||
clean_tmp_dir
|
||||
exit 100
|
||||
}
|
||||
if /var/lib/openreplay/busybox md5sum -c "${tmp_dir}/var.yaml.md5" &>/dev/null; then
|
||||
log info "No change detected in ${BWHITE}${OR_DIR}/vars.yaml${GREEN}. Not reloading"
|
||||
else
|
||||
reload
|
||||
fi
|
||||
clean_tmp_dir
|
||||
exit 100
|
||||
}
|
||||
if /var/lib/openreplay/busybox md5sum -c "${tmp_dir}/var.yaml.md5" &> /dev/null; then
|
||||
log info "No change detected in ${BWHITE}${OR_DIR}/vars.yaml${GREEN}. Not reloading"
|
||||
else
|
||||
reload
|
||||
fi
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
exit 0
|
||||
;;
|
||||
-s | --status)
|
||||
log title "Checking OpenReplay Components Status"
|
||||
status
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
log title "Checking OpenReplay Components Status"
|
||||
status
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
-l | --logs)
|
||||
# Skipping double quotes because we want globbing. For example
|
||||
# ./openreplay -l "chalice --tail 10"
|
||||
stern -A --container-state=running,terminated $2
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
# Skipping double quotes because we want globbing. For example
|
||||
# ./openreplay -l "chalice --tail 10"
|
||||
stern -A --container-state=running,terminated $2
|
||||
clean_tmp_dir
|
||||
exit 0
|
||||
;;
|
||||
# -- means the end of the arguments; drop this, and break out of the while loop
|
||||
--) shift; break ;;
|
||||
--)
|
||||
shift
|
||||
break
|
||||
;;
|
||||
# If invalid options were passed, then getopt should have reported an error,
|
||||
# which we checked as VALID_ARGUMENTS when getopt was called...
|
||||
*)
|
||||
echo "Unexpected option: $1 - this should not happen."
|
||||
help
|
||||
clean_tmp_dir
|
||||
;;
|
||||
esac
|
||||
echo "Unexpected option: $1 - this should not happen."
|
||||
help
|
||||
clean_tmp_dir
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
[ $# -eq 0 ] && help
|
||||
|
|
|
|||
|
|
@ -15,10 +15,10 @@ type: application
|
|||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.1
|
||||
version: 0.1.3
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
AppVersion: "v1.16.0"
|
||||
AppVersion: "v1.16.1"
|
||||
|
|
|
|||
|
|
@ -15,10 +15,10 @@ type: application
|
|||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.7
|
||||
version: 0.1.18
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
AppVersion: "v1.16.0"
|
||||
AppVersion: "v1.16.11"
|
||||
|
|
|
|||
|
|
@ -15,10 +15,10 @@ type: application
|
|||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (frontends://semver.org/)
|
||||
version: 0.1.10
|
||||
version: 0.1.13
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
AppVersion: "v1.16.0"
|
||||
AppVersion: "v1.16.3"
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue