* ci(deployment): injecting secrets Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * fix: typo * feat(installation): Enterprise license check * fix(install): reset ee cli args Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * Fix typo * Update README.md * feat (tracker-axios): init plugin * fix (tracker-axios): version patch * Fixed alert's unknown metrics handler * fix (tracker-mobx): dev-dependencies and updated package-lock * feat: APIs for user session data deleteion - wip * fix: alert metric value of performance.speed_index * Build and deploy scripts for enterprise edition (#13) * feat(installation): enterprise installation * chore(install): enabling ansible gather_facts Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(install): quotes for enterprise key Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(installation): enterprise install dbs Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(install): rename yaml * chore(install): change image tag Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(install): License key variable added * chore(deployment): Injecting enterprise license key in workers. * chore(install): remove deprecated files * chore(install): make domain_name mandatory in vars.yaml Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(actions): ee workers Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * feat(install): use local docker instead of crictl You can use the images built in the local machine, in installation, without putting that in any external registry. Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * feat: APIs for user session data deleteion * feat: prefix deleted mobs with DEL_ * feat: schedules to delete mobs * chore(ci): fix ee build Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * feat(build): passing build args to internal scripts Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(install): moving kafka topic creation at the end Kafka pods usually takes time to be active. Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(install): removing auth service. * chore(install): Adding rancher for cluster management * chore(install): proper name for alerts template * separate requirements and clean up * feat (frontend): typescript support * feat (tracker): 3.0.4: maintain baseURL & connAttempt options * feat(api): changed license validation * feat(api): ee-license fix for unprovided value * feat(api): fixed ee-signup cursor * feat(api): FOS fix replay-mob issue * feat(api): ee log ch-resources query * chore(ci): change openreplay-cli with kube-install.sh Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * ci(actions): change ee naming * feat(api): removed ch-logs * feat(install): injecting ee variables only on ee installation. Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(install): remove licence key from ee Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * fix(install): ch values for chalice * feat(clickhouse): moved creation scripts to EE folder * fix (backend-ee): disable ios tables so far * chore(install): remove deprecated mandatory variables. Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * feat(api): remove duplicate files & changed signup * fix(backend-ee): ch prepare after commit * fix(backend-ee): syntax * feat(api): added missing EE tenant column * fix(scripts-ee): correct default clickhouse host * feat(api): changed version_number location * feat(api): ee log ch-errors query * feat(api): ee fix ch-errors query * feat: skip to issue button (#23) * feat(api): 🐛 ee fix ambiguous ch-error query & accounts endpoint * Feature: Autoplay Sessions (#22) * feat: autoplay sessions * change: removed unused import * auto play filter by tab * feat(api): changed JWT authorizer & API_KEY authorizer & fix undefined project_key * feat (backend-devops): Dockerfile for all services in one image * feat(sourcemap-uploader): --verbose argument use instead of --log * feat(api): log middleware * Feature - dom inspector (#28) * feat (frontend): typescript support * feat(frontend): DOM Inspector init * fix(frontend): use tailwind bg * feat(frontend dom-inspector): add element selection & deletion * fix(frontend): todo comment * di - styling wip * feature(di) - editor theme * feat(frontend): parse attributes with RE (+ability to add) * feature(di) - input width * fix(ui): di - review changes Co-authored-by: ShiKhu <alex.kaminsky.11@gmail.com> * chore(install): remove depricated init_dbs * feat(api): ee override multi-tenant-core * fix(frontend-build): gen css types before build * fix(ui) - checking for the license (#30) Co-authored-by: Rajesh Rajendran <rjshrjndrn@gmail.com> Co-authored-by: Mehdi Osman <estradino@users.noreply.github.com> Co-authored-by: ShiKhu <alex.kaminsky.11@gmail.com> Co-authored-by: KRAIEM Taha Yassine <tahayk2@gmail.com> Co-authored-by: Rajesh Rajendran <rjshrjndrn@users.noreply.github.com> Co-authored-by: ourvakan <hi-psi@yandex.com> Co-authored-by: tahayk2@gmail.com <enissay4ever4github>
173 lines
4.6 KiB
Python
173 lines
4.6 KiB
Python
from chalicelib.utils import pg_client, helper
|
|
from chalicelib.utils.TimeUTC import TimeUTC
|
|
from chalicelib.core import sessions, sessions_mobs
|
|
|
|
|
|
class Actions:
|
|
DELETE_USER_DATA = "delete_user_data"
|
|
|
|
|
|
class JobStatus:
|
|
SCHEDULED = "scheduled"
|
|
COMPLETED = "completed"
|
|
FAILED = "failed"
|
|
CANCELLED = "cancelled"
|
|
|
|
|
|
def get(job_id):
|
|
with pg_client.PostgresClient() as cur:
|
|
query = cur.mogrify(
|
|
"""\
|
|
SELECT
|
|
*
|
|
FROM public.jobs
|
|
WHERE job_id = %(job_id)s;""",
|
|
{"job_id": job_id}
|
|
)
|
|
cur.execute(query=query)
|
|
data = cur.fetchone()
|
|
if data is None:
|
|
return {}
|
|
|
|
format_datetime(data)
|
|
|
|
return helper.dict_to_camel_case(data)
|
|
|
|
|
|
def get_all(project_id):
|
|
with pg_client.PostgresClient() as cur:
|
|
query = cur.mogrify(
|
|
"""\
|
|
SELECT
|
|
*
|
|
FROM public.jobs
|
|
WHERE project_id = %(project_id)s;""",
|
|
{"project_id": project_id}
|
|
)
|
|
cur.execute(query=query)
|
|
data = cur.fetchall()
|
|
for record in data:
|
|
format_datetime(record)
|
|
return helper.list_to_camel_case(data)
|
|
|
|
|
|
def create(project_id, data):
|
|
with pg_client.PostgresClient() as cur:
|
|
job = {
|
|
"status": "scheduled",
|
|
"project_id": project_id,
|
|
**data
|
|
}
|
|
|
|
query = cur.mogrify("""\
|
|
INSERT INTO public.jobs(
|
|
project_id, description, status, action,
|
|
reference_id, start_at
|
|
)
|
|
VALUES (
|
|
%(project_id)s, %(description)s, %(status)s, %(action)s,
|
|
%(reference_id)s, %(start_at)s
|
|
) RETURNING *;""", job)
|
|
|
|
cur.execute(query=query)
|
|
|
|
r = cur.fetchone()
|
|
format_datetime(r)
|
|
record = helper.dict_to_camel_case(r)
|
|
return record
|
|
|
|
|
|
def cancel_job(job_id, job):
|
|
job["status"] = JobStatus.CANCELLED
|
|
update(job_id=job_id, job=job)
|
|
|
|
|
|
def update(job_id, job):
|
|
with pg_client.PostgresClient() as cur:
|
|
job_data = {
|
|
"job_id": job_id,
|
|
"errors": job.get("errors"),
|
|
**job
|
|
}
|
|
|
|
query = cur.mogrify("""\
|
|
UPDATE public.jobs
|
|
SET
|
|
updated_at = timezone('utc'::text, now()),
|
|
status = %(status)s,
|
|
errors = %(errors)s
|
|
WHERE
|
|
job_id = %(job_id)s RETURNING *;""", job_data)
|
|
|
|
cur.execute(query=query)
|
|
|
|
r = cur.fetchone()
|
|
format_datetime(r)
|
|
record = helper.dict_to_camel_case(r)
|
|
return record
|
|
|
|
|
|
def format_datetime(r):
|
|
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
|
|
r["updated_at"] = TimeUTC.datetime_to_timestamp(r["updated_at"])
|
|
r["start_at"] = TimeUTC.datetime_to_timestamp(r["start_at"])
|
|
|
|
|
|
def get_scheduled_jobs():
|
|
with pg_client.PostgresClient() as cur:
|
|
query = cur.mogrify(
|
|
"""\
|
|
SELECT * FROM public.jobs
|
|
WHERE status = %(status)s AND start_at <= (now() at time zone 'utc');""",
|
|
{"status": JobStatus.SCHEDULED}
|
|
)
|
|
cur.execute(query=query)
|
|
data = cur.fetchall()
|
|
for record in data:
|
|
format_datetime(record)
|
|
|
|
return helper.list_to_camel_case(data)
|
|
|
|
|
|
def execute_jobs():
|
|
jobs = get_scheduled_jobs()
|
|
if len(jobs) == 0:
|
|
print('No jobs to execute.')
|
|
return
|
|
|
|
for job in jobs:
|
|
print(f"job can be executed {job['id']}")
|
|
try:
|
|
if job["action"] == Actions.DELETE_USER_DATA:
|
|
session_ids = sessions.get_session_ids_by_user_ids(
|
|
project_id=job["projectId"], user_ids=job["referenceId"]
|
|
)
|
|
|
|
sessions.delete_sessions_by_session_ids(session_ids)
|
|
sessions_mobs.delete_mobs(session_ids)
|
|
else:
|
|
raise Exception(f"The action {job['action']} not supported.")
|
|
|
|
job["status"] = JobStatus.COMPLETED
|
|
print(f"job completed {job['id']}")
|
|
except Exception as e:
|
|
job["status"] = JobStatus.FAILED
|
|
job["error"] = str(e)
|
|
print(f"job failed {job['id']}")
|
|
|
|
update(job["job_id"], job)
|
|
|
|
|
|
def group_user_ids_by_project_id(jobs, now):
|
|
project_id_user_ids = {}
|
|
for job in jobs:
|
|
if job["startAt"] > now:
|
|
continue
|
|
|
|
project_id = job["projectId"]
|
|
if project_id not in project_id_user_ids:
|
|
project_id_user_ids[project_id] = []
|
|
|
|
project_id_user_ids[project_id].append(job)
|
|
|
|
return project_id_user_ids
|