Dev (#2523)
* fix(chalice): fixed Math-operators validation refactor(chalice): search for sessions that have events for heatmaps * refactor(chalice): search for sessions that have at least 1 location event for heatmaps * fix(chalice): fixed Math-operators validation refactor(chalice): search for sessions that have events for heatmaps * refactor(chalice): search for sessions that have at least 1 location event for heatmaps * feat(chalice): autocomplete return top 10 with stats * fix(chalice): fixed autocomplete top 10 meta-filters * fix(alerts): fixed loggers refactor(chalice): refactored health-check logging refactor(chalice): upgraded dependencies refactor(alerts): upgraded dependencies refactor(crons): upgraded dependencies
This commit is contained in:
parent
3131f58afd
commit
1f1bc273d0
13 changed files with 114 additions and 108 deletions
12
api/Pipfile
12
api/Pipfile
|
|
@ -6,18 +6,18 @@ name = "pypi"
|
|||
[packages]
|
||||
urllib3 = "==1.26.16"
|
||||
requests = "==2.32.3"
|
||||
boto3 = "==1.34.158"
|
||||
boto3 = "==1.35.10"
|
||||
pyjwt = "==2.9.0"
|
||||
psycopg2-binary = "==2.9.9"
|
||||
elasticsearch = "==8.14.0"
|
||||
elasticsearch = "==8.15.0"
|
||||
jira = "==3.8.0"
|
||||
fastapi = "==0.112.0"
|
||||
fastapi = "==0.112.2"
|
||||
python-decouple = "==3.8"
|
||||
apscheduler = "==3.10.4"
|
||||
redis = "==5.1.0b6"
|
||||
cachetools = "==5.4.0"
|
||||
psycopg = {extras = ["pool", "binary"], version = "==3.2.1"}
|
||||
uvicorn = {extras = ["standard"], version = "==0.30.5"}
|
||||
cachetools = "==5.5.0"
|
||||
psycopg = {extras = ["binary", "pool"], version = "==3.2.1"}
|
||||
uvicorn = {extras = ["standard"], version = "==0.30.6"}
|
||||
pydantic = {extras = ["email"], version = "==2.8.2"}
|
||||
|
||||
[dev-packages]
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
import decimal
|
||||
import logging
|
||||
|
||||
from decouple import config
|
||||
from pydantic_core._pydantic_core import ValidationError
|
||||
|
||||
import schemas
|
||||
|
|
@ -11,8 +10,7 @@ from chalicelib.core import sessions
|
|||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logging.basicConfig(level=config("LOGLEVEL", default=logger.info))
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LeftToDb = {
|
||||
schemas.AlertColumn.PERFORMANCE__DOM_CONTENT_LOADED__AVERAGE: {
|
||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from urllib.parse import urlparse
|
||||
import logging
|
||||
|
||||
import redis
|
||||
import requests
|
||||
|
|
@ -7,11 +7,13 @@ from decouple import config
|
|||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def app_connection_string(name, port, path):
|
||||
namespace = config("POD_NAMESPACE", default="app")
|
||||
conn_string = config("CLUSTER_URL", default="svc.cluster.local")
|
||||
return f"http://{'.'.join(filter(None, [name, namespace, conn_string]))}:{port}/{path}"
|
||||
return f"http://{name}.{namespace}.{conn_string}:{port}/{path}"
|
||||
|
||||
|
||||
HEALTH_ENDPOINTS = {
|
||||
|
|
@ -44,17 +46,17 @@ def __check_database_pg(*_):
|
|||
with pg_client.PostgresClient() as cur:
|
||||
try:
|
||||
cur.execute("SHOW server_version;")
|
||||
server_version = cur.fetchone()
|
||||
# server_version = cur.fetchone()
|
||||
except Exception as e:
|
||||
print("!! health failed: postgres not responding")
|
||||
print(str(e))
|
||||
logger.error("!! health failed: postgres not responding")
|
||||
logger.exception(e)
|
||||
return fail_response
|
||||
try:
|
||||
cur.execute("SELECT openreplay_version() AS version;")
|
||||
schema_version = cur.fetchone()
|
||||
# schema_version = cur.fetchone()
|
||||
except Exception as e:
|
||||
print("!! health failed: openreplay_version not defined")
|
||||
print(str(e))
|
||||
logger.error("!! health failed: openreplay_version not defined")
|
||||
logger.exception(e)
|
||||
return fail_response
|
||||
return {
|
||||
"health": True,
|
||||
|
|
@ -87,22 +89,22 @@ def __check_be_service(service_name):
|
|||
try:
|
||||
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
||||
if results.status_code != 200:
|
||||
print(f"!! issue with the {service_name}-health code:{results.status_code}")
|
||||
print(results.text)
|
||||
logger.error(f"!! issue with the {service_name}-health code:{results.status_code}")
|
||||
logger.error(results.text)
|
||||
# fail_response["details"]["errors"].append(results.text)
|
||||
return fail_response
|
||||
except requests.exceptions.Timeout:
|
||||
print(f"!! Timeout getting {service_name}-health")
|
||||
logger.error(f"!! Timeout getting {service_name}-health")
|
||||
# fail_response["details"]["errors"].append("timeout")
|
||||
return fail_response
|
||||
except Exception as e:
|
||||
print(f"!! Issue getting {service_name}-health response")
|
||||
print(str(e))
|
||||
logger.error(f"!! Issue getting {service_name}-health response")
|
||||
logger.exception(e)
|
||||
try:
|
||||
print(results.text)
|
||||
logger.error(results.text)
|
||||
# fail_response["details"]["errors"].append(results.text)
|
||||
except Exception:
|
||||
print("couldn't get response")
|
||||
logger.error("couldn't get response")
|
||||
# fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
return {
|
||||
|
|
@ -123,11 +125,11 @@ def __check_redis(*_):
|
|||
return fail_response
|
||||
|
||||
try:
|
||||
r = redis.from_url(config("REDIS_STRING"))
|
||||
r = redis.from_url(config("REDIS_STRING"), socket_timeout=2)
|
||||
r.ping()
|
||||
except Exception as e:
|
||||
print("!! Issue getting redis-health response")
|
||||
print(str(e))
|
||||
logger.error("!! Issue getting redis-health response")
|
||||
logger.exception(e)
|
||||
# fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
|
||||
|
|
@ -149,8 +151,8 @@ def __check_SSL(*_):
|
|||
try:
|
||||
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
|
||||
except Exception as e:
|
||||
print("!! health failed: SSL Certificate")
|
||||
print(str(e))
|
||||
logger.error("!! health failed: SSL Certificate")
|
||||
logger.exception(e)
|
||||
return fail_response
|
||||
return {
|
||||
"health": True,
|
||||
|
|
|
|||
|
|
@ -1,17 +1,17 @@
|
|||
# Keep this version to not have conflicts between requests and boto3
|
||||
urllib3==1.26.16
|
||||
requests==2.32.3
|
||||
boto3==1.34.158
|
||||
boto3==1.35.10
|
||||
pyjwt==2.9.0
|
||||
psycopg2-binary==2.9.9
|
||||
psycopg[pool,binary]==3.2.1
|
||||
elasticsearch==8.14.0
|
||||
elasticsearch==8.15.0
|
||||
jira==3.8.0
|
||||
|
||||
|
||||
|
||||
fastapi==0.112.0
|
||||
uvicorn[standard]==0.30.5
|
||||
fastapi==0.112.2
|
||||
uvicorn[standard]==0.30.6
|
||||
python-decouple==3.8
|
||||
pydantic[email]==2.8.2
|
||||
apscheduler==3.10.4
|
||||
|
|
|
|||
|
|
@ -1,18 +1,18 @@
|
|||
# Keep this version to not have conflicts between requests and boto3
|
||||
urllib3==1.26.16
|
||||
requests==2.32.3
|
||||
boto3==1.34.158
|
||||
boto3==1.35.10
|
||||
pyjwt==2.9.0
|
||||
psycopg2-binary==2.9.9
|
||||
psycopg[pool,binary]==3.2.1
|
||||
elasticsearch==8.14.0
|
||||
elasticsearch==8.15.0
|
||||
jira==3.8.0
|
||||
cachetools==5.4.0
|
||||
cachetools==5.5.0
|
||||
|
||||
|
||||
|
||||
fastapi==0.112.0
|
||||
uvicorn[standard]==0.30.5
|
||||
fastapi==0.112.2
|
||||
uvicorn[standard]==0.30.6
|
||||
python-decouple==3.8
|
||||
pydantic[email]==2.8.2
|
||||
apscheduler==3.10.4
|
||||
|
|
|
|||
|
|
@ -6,23 +6,23 @@ name = "pypi"
|
|||
[packages]
|
||||
urllib3 = "==1.26.16"
|
||||
requests = "==2.32.3"
|
||||
boto3 = "==1.34.158"
|
||||
boto3 = "==1.35.10"
|
||||
pyjwt = "==2.9.0"
|
||||
psycopg2-binary = "==2.9.9"
|
||||
elasticsearch = "==8.14.0"
|
||||
elasticsearch = "==8.15.0"
|
||||
jira = "==3.8.0"
|
||||
fastapi = "==0.112.0"
|
||||
gunicorn = "==22.0.0"
|
||||
fastapi = "==0.112.2"
|
||||
gunicorn = "==23.0.0"
|
||||
python-decouple = "==3.8"
|
||||
apscheduler = "==3.10.4"
|
||||
python3-saml = "==1.16.0"
|
||||
redis = "==5.1.0b6"
|
||||
azure-storage-blob = "==12.22.0"
|
||||
cachetools = "==5.4.0"
|
||||
psycopg = {extras = ["binary", "pool"], version = "==3.2.1"}
|
||||
uvicorn = {extras = ["standard"], version = "==0.30.5"}
|
||||
cachetools = "==5.5.0"
|
||||
psycopg = {extras = ["pool", "binary"], version = "==3.2.1"}
|
||||
uvicorn = {extras = ["standard"], version = "==0.30.6"}
|
||||
pydantic = {extras = ["email"], version = "==2.8.2"}
|
||||
clickhouse-driver = {extras = ["lz4"], version = "==0.2.8"}
|
||||
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
|
||||
|
||||
[dev-packages]
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
print("============= CRONS =============")
|
||||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from crons import core_dynamic_crons, ee_crons
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
ACTIONS = {
|
||||
"TELEMETRY": core_dynamic_crons.telemetry_cron,
|
||||
"JOB": core_dynamic_crons.run_scheduled_jobs,
|
||||
|
|
@ -16,9 +18,9 @@ ACTIONS = {
|
|||
|
||||
def default_action(action):
|
||||
async def _func():
|
||||
print(f"{action} not found in crons-definitions")
|
||||
print("possible actions:")
|
||||
print(list(ACTIONS.keys()))
|
||||
logger.warning(f"{action} not found in crons-definitions")
|
||||
logger.warning("possible actions:")
|
||||
logger.warning(list(ACTIONS.keys()))
|
||||
|
||||
return _func
|
||||
|
||||
|
|
@ -29,8 +31,8 @@ async def process(action):
|
|||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) < 2 or len(sys.argv[1]) < 1:
|
||||
print("please provide actions as argument\npossible actions:")
|
||||
print(list(ACTIONS.keys()))
|
||||
logger.warning("please provide actions as argument\npossible actions:")
|
||||
logger.warning(list(ACTIONS.keys()))
|
||||
else:
|
||||
print(f"action: {sys.argv[1]}")
|
||||
logger.info(f"action: {sys.argv[1]}")
|
||||
asyncio.run(process(sys.argv[1]))
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
from pydantic_core._pydantic_core import ValidationError
|
||||
|
||||
import schemas
|
||||
|
|
@ -10,7 +9,7 @@ from chalicelib.core import sessions_exp as sessions
|
|||
from chalicelib.utils import pg_client, ch_client, exp_ch_helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
LeftToDb = {
|
||||
schemas.AlertColumn.PERFORMANCE__DOM_CONTENT_LOADED__AVERAGE: {
|
||||
|
|
@ -128,8 +127,8 @@ def Build(a):
|
|||
try:
|
||||
data = schemas.SessionsSearchPayloadSchema.model_validate(a["filter"])
|
||||
except ValidationError:
|
||||
logging.warning("Validation error for:")
|
||||
logging.warning(a["filter"])
|
||||
logger.warning("Validation error for:")
|
||||
logger.warning(a["filter"])
|
||||
raise
|
||||
|
||||
full_args, query_part = sessions.search_query_parts_ch(data=data, error_status=None, errors_only=False,
|
||||
|
|
@ -211,24 +210,24 @@ def process():
|
|||
try:
|
||||
query = ch_cur.format(query, params)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
logger.error(
|
||||
f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logging.error(e)
|
||||
logger.error(e)
|
||||
continue
|
||||
logging.debug(alert)
|
||||
logging.debug(query)
|
||||
logger.debug(alert)
|
||||
logger.debug(query)
|
||||
try:
|
||||
result = ch_cur.execute(query)
|
||||
if len(result) > 0:
|
||||
result = result[0]
|
||||
|
||||
if result["valid"]:
|
||||
logging.info("Valid alert, notifying users")
|
||||
logger.info("Valid alert, notifying users")
|
||||
notifications.append(alerts_processor.generate_notification(alert, result))
|
||||
except Exception as e:
|
||||
logging.error(f"!!!Error while running alert query for alertId:{alert['alertId']}")
|
||||
logging.error(str(e))
|
||||
logging.error(query)
|
||||
logger.error(f"!!!Error while running alert query for alertId:{alert['alertId']}")
|
||||
logger.error(str(e))
|
||||
logger.error(query)
|
||||
if len(notifications) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""UPDATE public.alerts
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from urllib.parse import urlparse
|
||||
import logging
|
||||
|
||||
import redis
|
||||
import requests
|
||||
|
|
@ -8,6 +8,8 @@ from decouple import config
|
|||
from chalicelib.utils import pg_client, ch_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def app_connection_string(name, port, path):
|
||||
namespace = config("POD_NAMESPACE", default="app")
|
||||
|
|
@ -45,17 +47,17 @@ def __check_database_pg(*_):
|
|||
with pg_client.PostgresClient() as cur:
|
||||
try:
|
||||
cur.execute("SHOW server_version;")
|
||||
server_version = cur.fetchone()
|
||||
# server_version = cur.fetchone()
|
||||
except Exception as e:
|
||||
print("!! health failed: postgres not responding")
|
||||
print(str(e))
|
||||
logger.error("!! health failed: postgres not responding")
|
||||
logger.exception(e)
|
||||
return fail_response
|
||||
try:
|
||||
cur.execute("SELECT openreplay_version() AS version;")
|
||||
schema_version = cur.fetchone()
|
||||
# schema_version = cur.fetchone()
|
||||
except Exception as e:
|
||||
print("!! health failed: openreplay_version not defined")
|
||||
print(str(e))
|
||||
logger.error("!! health failed: openreplay_version not defined")
|
||||
logger.exception(e)
|
||||
return fail_response
|
||||
return {
|
||||
"health": True,
|
||||
|
|
@ -88,22 +90,22 @@ def __check_be_service(service_name):
|
|||
try:
|
||||
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
||||
if results.status_code != 200:
|
||||
print(f"!! issue with the {service_name}-health code:{results.status_code}")
|
||||
print(results.text)
|
||||
logger.error(f"!! issue with the {service_name}-health code:{results.status_code}")
|
||||
logger.error(results.text)
|
||||
# fail_response["details"]["errors"].append(results.text)
|
||||
return fail_response
|
||||
except requests.exceptions.Timeout:
|
||||
print(f"!! Timeout getting {service_name}-health")
|
||||
logger.error(f"!! Timeout getting {service_name}-health")
|
||||
# fail_response["details"]["errors"].append("timeout")
|
||||
return fail_response
|
||||
except Exception as e:
|
||||
print(f"!! Issue getting {service_name}-health response")
|
||||
print(str(e))
|
||||
logger.error(f"!! Issue getting {service_name}-health response")
|
||||
logger.exception(e)
|
||||
try:
|
||||
print(results.text)
|
||||
logger.error(results.text)
|
||||
# fail_response["details"]["errors"].append(results.text)
|
||||
except:
|
||||
print("couldn't get response")
|
||||
except Exception:
|
||||
logger.error("couldn't get response")
|
||||
# fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
return {
|
||||
|
|
@ -124,12 +126,11 @@ def __check_redis(*_):
|
|||
return fail_response
|
||||
|
||||
try:
|
||||
u = urlparse(config("REDIS_STRING"))
|
||||
r = redis.Redis(host=u.hostname, port=u.port, socket_timeout=2)
|
||||
r = redis.from_url(config("REDIS_STRING"), socket_timeout=2)
|
||||
r.ping()
|
||||
except Exception as e:
|
||||
print("!! Issue getting redis-health response")
|
||||
print(str(e))
|
||||
logger.error("!! Issue getting redis-health response")
|
||||
logger.exception(e)
|
||||
# fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
|
||||
|
|
@ -151,8 +152,8 @@ def __check_SSL(*_):
|
|||
try:
|
||||
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
|
||||
except Exception as e:
|
||||
print("!! health failed: SSL Certificate")
|
||||
print(str(e))
|
||||
logger.error("!! health failed: SSL Certificate")
|
||||
logger.exception(e)
|
||||
return fail_response
|
||||
return {
|
||||
"health": True,
|
||||
|
|
@ -347,8 +348,8 @@ def __check_database_ch(*_):
|
|||
try:
|
||||
server_version = ch.execute("SELECT version() AS server_version;")
|
||||
except Exception as e:
|
||||
print("!! health failed: clickhouse not responding")
|
||||
print(str(e))
|
||||
logger.error("!! health failed: clickhouse not responding")
|
||||
logger.exception(e)
|
||||
return fail_response
|
||||
|
||||
schema_version = ch.execute("""SELECT 1
|
||||
|
|
@ -358,7 +359,7 @@ def __check_database_ch(*_):
|
|||
schema_version = ch.execute("SELECT openreplay_version() AS version;")
|
||||
schema_version = schema_version[0]["version"]
|
||||
else:
|
||||
print("!! health failed: clickhouse schema is outdated")
|
||||
logger.error("!! health failed: clickhouse schema is outdated")
|
||||
schema_version = "unknown"
|
||||
# fail_response["details"]["errors"].append("clickhouse schema is outdated")
|
||||
return fail_response
|
||||
|
|
@ -387,8 +388,8 @@ def __check_database_ch(*_):
|
|||
# raise Exception('topics not found')
|
||||
#
|
||||
# except Exception as e:
|
||||
# print("!! Issue getting kafka-health response")
|
||||
# print(str(e))
|
||||
# logger.error("!! Issue getting kafka-health response")
|
||||
# logger.exception(e)
|
||||
# fail_response["details"]["errors"].append(str(e))
|
||||
# return fail_response
|
||||
#
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
import logging
|
||||
|
||||
from apscheduler.triggers.cron import CronTrigger
|
||||
from apscheduler.triggers.interval import IntervalTrigger
|
||||
from decouple import config
|
||||
|
|
@ -6,6 +8,8 @@ from chalicelib.core import jobs
|
|||
from chalicelib.core import telemetry, unlock
|
||||
from chalicelib.core import weekly_report as weekly_report_script, health
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
async def run_scheduled_jobs() -> None:
|
||||
jobs.execute_jobs()
|
||||
|
|
@ -20,9 +24,9 @@ async def telemetry_cron() -> None:
|
|||
|
||||
|
||||
async def unlock_cron() -> None:
|
||||
print("validating license")
|
||||
logger.info("validating license")
|
||||
unlock.check()
|
||||
print(f"valid: {unlock.is_valid()}")
|
||||
logger.info(f"valid: {unlock.is_valid()}")
|
||||
|
||||
|
||||
async def health_cron() -> None:
|
||||
|
|
|
|||
|
|
@ -1,20 +1,20 @@
|
|||
# Keep this version to not have conflicts between requests and boto3
|
||||
urllib3==1.26.16
|
||||
requests==2.32.3
|
||||
boto3==1.34.158
|
||||
boto3==1.35.10
|
||||
pyjwt==2.9.0
|
||||
psycopg2-binary==2.9.9
|
||||
psycopg[pool,binary]==3.2.1
|
||||
elasticsearch==8.14.0
|
||||
elasticsearch==8.15.0
|
||||
jira==3.8.0
|
||||
|
||||
|
||||
|
||||
fastapi==0.112.0
|
||||
uvicorn[standard]==0.30.5
|
||||
fastapi==0.112.2
|
||||
uvicorn[standard]==0.30.6
|
||||
python-decouple==3.8
|
||||
pydantic[email]==2.8.2
|
||||
apscheduler==3.10.4
|
||||
|
||||
clickhouse-driver[lz4]==0.2.8
|
||||
clickhouse-driver[lz4]==0.2.9
|
||||
azure-storage-blob==12.22.0
|
||||
|
|
@ -1,20 +1,20 @@
|
|||
# Keep this version to not have conflicts between requests and boto3
|
||||
urllib3==1.26.16
|
||||
requests==2.32.3
|
||||
boto3==1.34.158
|
||||
boto3==1.35.10
|
||||
pyjwt==2.9.0
|
||||
psycopg2-binary==2.9.9
|
||||
psycopg[pool,binary]==3.2.1
|
||||
elasticsearch==8.14.0
|
||||
elasticsearch==8.15.0
|
||||
jira==3.8.0
|
||||
|
||||
|
||||
|
||||
fastapi==0.112.0
|
||||
fastapi==0.112.2
|
||||
python-decouple==3.8
|
||||
pydantic[email]==2.8.2
|
||||
apscheduler==3.10.4
|
||||
|
||||
clickhouse-driver[lz4]==0.2.8
|
||||
clickhouse-driver[lz4]==0.2.9
|
||||
redis==5.1.0b6
|
||||
azure-storage-blob==12.22.0
|
||||
|
|
|
|||
|
|
@ -1,24 +1,24 @@
|
|||
# Keep this version to not have conflicts between requests and boto3
|
||||
urllib3==1.26.16
|
||||
requests==2.32.3
|
||||
boto3==1.34.158
|
||||
boto3==1.35.10
|
||||
pyjwt==2.9.0
|
||||
psycopg2-binary==2.9.9
|
||||
psycopg[pool,binary]==3.2.1
|
||||
elasticsearch==8.14.0
|
||||
elasticsearch==8.15.0
|
||||
jira==3.8.0
|
||||
cachetools==5.4.0
|
||||
cachetools==5.5.0
|
||||
|
||||
|
||||
|
||||
fastapi==0.112.0
|
||||
uvicorn[standard]==0.30.5
|
||||
gunicorn==22.0.0
|
||||
fastapi==0.112.2
|
||||
uvicorn[standard]==0.30.6
|
||||
gunicorn==23.0.0
|
||||
python-decouple==3.8
|
||||
pydantic[email]==2.8.2
|
||||
apscheduler==3.10.4
|
||||
|
||||
clickhouse-driver[lz4]==0.2.8
|
||||
clickhouse-driver[lz4]==0.2.9
|
||||
# TODO: enable after xmlsec fix https://github.com/xmlsec/python-xmlsec/issues/252
|
||||
#--no-binary is used to avoid libxml2 library version incompatibilities between xmlsec and lxml
|
||||
python3-saml==1.16.0 --no-binary=lxml
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue