feat(chalice): upgraded startup/shutdown logic

feat(alerts): health-check endpoint
This commit is contained in:
Taha Yassine Kraiem 2023-03-14 16:27:40 +01:00
parent 8e5ae800d5
commit 8b6ebbe815
11 changed files with 83 additions and 81 deletions

View file

@ -1,33 +1,17 @@
import logging import logging
from contextlib import asynccontextmanager
from apscheduler.schedulers.asyncio import AsyncIOScheduler from apscheduler.schedulers.asyncio import AsyncIOScheduler
from decouple import config from decouple import config
from fastapi import FastAPI from fastapi import FastAPI
from chalicelib.utils import pg_client
from chalicelib.core import alerts_processor from chalicelib.core import alerts_processor
from chalicelib.utils import pg_client
app = FastAPI(root_path="/alerts", docs_url=config("docs_url", default=""), redoc_url=config("redoc_url", default=""))
logging.info("============= ALERTS =============")
@app.get("/") @asynccontextmanager
async def root(): async def lifespan(app: FastAPI):
return {"status": "Running"} # Startup
app.schedule = AsyncIOScheduler()
loglevel = config("LOGLEVEL", default=logging.INFO)
print(f">Loglevel set to: {loglevel}")
logging.basicConfig(level=loglevel)
ap_logger = logging.getLogger('apscheduler')
ap_logger.setLevel(loglevel)
app.schedule = AsyncIOScheduler()
@app.on_event("startup")
async def startup():
logging.info(">>>>> starting up <<<<<") logging.info(">>>>> starting up <<<<<")
await pg_client.init() await pg_client.init()
app.schedule.start() app.schedule.start()
@ -39,24 +23,44 @@ async def startup():
for job in app.schedule.get_jobs(): for job in app.schedule.get_jobs():
ap_logger.info({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)}) ap_logger.info({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
# App listening
yield
@app.on_event("shutdown") # Shutdown
async def shutdown():
logging.info(">>>>> shutting down <<<<<") logging.info(">>>>> shutting down <<<<<")
app.schedule.shutdown(wait=False) app.schedule.shutdown(wait=False)
await pg_client.terminate() await pg_client.terminate()
@app.get('/private/shutdown', tags=["private"]) app = FastAPI(root_path="/alerts", docs_url=config("docs_url", default=""), redoc_url=config("redoc_url", default=""),
async def stop_server(): lifespan=lifespan)
logging.info("Requested shutdown") logging.info("============= ALERTS =============")
await shutdown()
import os, signal
os.kill(1, signal.SIGTERM)
@app.get("/")
async def root():
return {"status": "Running"}
@app.get("/health")
async def get_health_status():
return {"data": {
"health": True,
"details": {"version": config("version_number", default="unknown")}
}}
app.schedule = AsyncIOScheduler()
loglevel = config("LOGLEVEL", default=logging.INFO)
print(f">Loglevel set to: {loglevel}")
logging.basicConfig(level=loglevel)
ap_logger = logging.getLogger('apscheduler')
ap_logger.setLevel(loglevel)
app.schedule = AsyncIOScheduler()
if config("LOCAL_DEV", default=False, cast=bool): if config("LOCAL_DEV", default=False, cast=bool):
@app.get('/private/trigger', tags=["private"]) @app.get('/trigger', tags=["private"])
async def trigger_main_cron(): async def trigger_main_cron():
logging.info("Triggering main cron") logging.info("Triggering main cron")
alerts_processor.process() alerts_processor.process()

View file

@ -29,7 +29,7 @@ if config("LOCAL_DEV", cast=bool, default=False):
else: else:
HEALTH_ENDPOINTS = { HEALTH_ENDPOINTS = {
"alerts": "http://alerts-openreplay.app.svc.cluster.local:8888/metrics", "alerts": "http://alerts-openreplay.app.svc.cluster.local:8888/health",
"assets": "http://assets-openreplay.app.svc.cluster.local:8888/metrics", "assets": "http://assets-openreplay.app.svc.cluster.local:8888/metrics",
"assist": "http://assist-openreplay.app.svc.cluster.local:8888/health", "assist": "http://assist-openreplay.app.svc.cluster.local:8888/health",
"chalice": "http://chalice-openreplay.app.svc.cluster.local:8888/metrics", "chalice": "http://chalice-openreplay.app.svc.cluster.local:8888/metrics",

View file

@ -1,3 +1,3 @@
#!/bin/sh #!/bin/sh
uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --reload --proxy-headers uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --proxy-headers

View file

@ -1,3 +1,3 @@
#!/bin/sh #!/bin/sh
export ASSIST_KEY=ignore export ASSIST_KEY=ignore
uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --reload uvicorn app:app --host 0.0.0.0 --port 8888

View file

@ -1,3 +1,3 @@
#!/bin/zsh #!/bin/zsh
uvicorn app_alerts:app --reload uvicorn app_alerts:app --reload --port 8888

2
ee/api/.gitignore vendored
View file

@ -265,6 +265,8 @@ Pipfile.lock
/app_alerts.py /app_alerts.py
/build_alerts.sh /build_alerts.sh
/build_crons.sh /build_crons.sh
/run-dev.sh
/run-alerts-dev.sh
/routers/subs/health.py /routers/subs/health.py
/routers/subs/v1_api.py /routers/subs/v1_api.py
#exp /chalicelib/core/dashboards.py #exp /chalicelib/core/dashboards.py

View file

@ -1,5 +1,6 @@
import logging import logging
import queue import queue
from contextlib import asynccontextmanager
from apscheduler.schedulers.asyncio import AsyncIOScheduler from apscheduler.schedulers.asyncio import AsyncIOScheduler
from decouple import config from decouple import config
@ -10,9 +11,9 @@ from starlette import status
from starlette.responses import StreamingResponse, JSONResponse from starlette.responses import StreamingResponse, JSONResponse
from chalicelib.core import traces from chalicelib.core import traces
from chalicelib.utils import events_queue
from chalicelib.utils import helper from chalicelib.utils import helper
from chalicelib.utils import pg_client from chalicelib.utils import pg_client
from chalicelib.utils import events_queue
from routers import core, core_dynamic, ee, saml from routers import core, core_dynamic, ee, saml
from routers.crons import core_crons from routers.crons import core_crons
from routers.crons import core_dynamic_crons from routers.crons import core_dynamic_crons
@ -20,7 +21,43 @@ from routers.crons import ee_crons
from routers.subs import insights, metrics, v1_api_ee from routers.subs import insights, metrics, v1_api_ee
from routers.subs import v1_api, health from routers.subs import v1_api, health
app = FastAPI(root_path="/api", docs_url=config("docs_url", default=""), redoc_url=config("redoc_url", default="")) loglevel = config("LOGLEVEL", default=logging.INFO)
print(f">Loglevel set to: {loglevel}")
logging.basicConfig(level=loglevel)
ap_logger = logging.getLogger('apscheduler')
ap_logger.setLevel(loglevel)
@asynccontextmanager
async def lifespan(app: FastAPI):
# Startup
logging.info(">>>>> starting up <<<<<")
app.schedule = AsyncIOScheduler()
app.queue_system = queue.Queue()
await pg_client.init()
await events_queue.init()
app.schedule.start()
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs + traces.cron_jobs + ee_crons.ee_cron_jobs:
app.schedule.add_job(id=job["func"].__name__, **job)
ap_logger.info(">Scheduled jobs:")
for job in app.schedule.get_jobs():
ap_logger.info({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
# App listening
yield
# Shutdown
logging.info(">>>>> shutting down <<<<<")
app.schedule.shutdown(wait=True)
await traces.process_traces_queue()
await events_queue.terminate()
await pg_client.terminate()
app = FastAPI(root_path="/api", docs_url=config("docs_url", default=""), redoc_url=config("redoc_url", default=""),
lifespan=lifespan)
app.add_middleware(GZipMiddleware, minimum_size=1000) app.add_middleware(GZipMiddleware, minimum_size=1000)
@ -71,43 +108,3 @@ app.include_router(v1_api_ee.app_apikey)
app.include_router(health.public_app) app.include_router(health.public_app)
app.include_router(health.app) app.include_router(health.app)
app.include_router(health.app_apikey) app.include_router(health.app_apikey)
loglevel = config("LOGLEVEL", default=logging.INFO)
print(f">Loglevel set to: {loglevel}")
logging.basicConfig(level=loglevel)
ap_logger = logging.getLogger('apscheduler')
ap_logger.setLevel(loglevel)
app.schedule = AsyncIOScheduler()
app.queue_system = queue.Queue()
@app.on_event("startup")
async def startup():
logging.info(">>>>> starting up <<<<<")
await pg_client.init()
await events_queue.init()
app.schedule.start()
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs + traces.cron_jobs + ee_crons.ee_cron_jobs:
app.schedule.add_job(id=job["func"].__name__, **job)
ap_logger.info(">Scheduled jobs:")
for job in app.schedule.get_jobs():
ap_logger.info({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
@app.on_event("shutdown")
async def shutdown():
logging.info(">>>>> shutting down <<<<<")
app.schedule.shutdown(wait=True)
await traces.process_traces_queue()
await events_queue.terminate()
await pg_client.terminate()
@app.get('/private/shutdown', tags=["private"])
async def stop_server():
logging.info("Requested shutdown")
await shutdown()
import os, signal
os.kill(1, signal.SIGTERM)

View file

@ -86,3 +86,5 @@ rm -rf ./chalicelib/core/performance_event.py
rm -rf ./chalicelib/core/saved_search.py rm -rf ./chalicelib/core/saved_search.py
rm -rf ./app_alerts.py rm -rf ./app_alerts.py
rm -rf ./build_alerts.sh rm -rf ./build_alerts.sh
rm -rf ./run-dev.sh
rm -rf ./run-alerts-dev.sh

View file

@ -2,4 +2,4 @@
sh env_vars.sh sh env_vars.sh
source /tmp/.env.override source /tmp/.env.override
uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --reload --proxy-headers uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --proxy-headers

View file

@ -2,4 +2,4 @@
export ASSIST_KEY=ignore export ASSIST_KEY=ignore
sh env_vars.sh sh env_vars.sh
source /tmp/.env.override source /tmp/.env.override
uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --reload uvicorn app:app --host 0.0.0.0 --port 8888

View file

@ -1,3 +0,0 @@
#!/bin/zsh
uvicorn app:app --reload