feat(api): assist stats (#1488)

* feat(api): assist stats

* refactor(helm): Updating helm code

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(actions): build

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* feat(actions): deploy image to ee also

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

---------

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
Co-authored-by: rjshrjndrn <rjshrjndrn@gmail.com>
This commit is contained in:
Shekar Siri 2023-10-02 15:20:49 +02:00 committed by GitHub
parent 80b7a5a006
commit 8cd6b6216e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 1191 additions and 0 deletions

185
.github/workflows/assist-stats.yaml vendored Normal file
View file

@ -0,0 +1,185 @@
# This action will push the assist-stats changes to aws
on:
workflow_dispatch:
inputs:
skip_security_checks:
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
required: false
default: 'false'
push:
branches:
- dev
paths:
- "assist-stats/**"
- "!assist-stats/.gitignore"
- "!assist-stats/*-dev.sh"
- "!assist-stats/requirements-*.txt"
name: Build and Deploy Assist Stats
jobs:
deploy:
name: Deploy
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
# We need to diff with old commit
# to see which workers got changed.
fetch-depth: 2
- name: Docker login
run: |
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
- uses: azure/k8s-set-context@v1
with:
method: kubeconfig
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
# Caching docker images
- uses: satackey/action-docker-layer-caching@v0.0.11
# Ignore the failure of a step and avoid terminating the job.
continue-on-error: true
- name: Building and Pushing assist-stats image
id: build-image
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging
run: |
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
cd assist-stats
PUSH_IMAGE=0 bash -x ./build.sh
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("assist-stats")
for image in ${images[*]};do
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {
exit $err_code
}
} && {
echo "Skipping Security Checks"
}
images=("assist-stats")
for image in ${images[*]};do
docker push $DOCKER_REPO/$image:$IMAGE_TAG
done
- name: Creating old image input
run: |
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
tag: ${image_array[1]}
EOF
done
- name: Deploy to kubernetes
run: |
cd scripts/helmcharts/
## Update secerts
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
# Update changed image tag
sed -i "/assist-stats/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
mv openreplay/charts/{ingress-nginx,assist-stats,quickwit} /tmp
rm -rf openreplay/assist-stats/*
mv /tmp/{ingress-nginx,assist-stats,quickwit} openreplay/charts/
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging
### Enterprise code deployment
- uses: azure/k8s-set-context@v1
with:
method: kubeconfig
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontextee
- name: Resetting vars file
run: |
git checkout -- scripts/helmcharts/vars.yaml
- name: Deploy to kubernetes ee
run: |
cd scripts/helmcharts/
cat <<EOF>/tmp/image_override.yaml
assist-stats:
image:
# We've to strip off the -ee, as helm will append it.
tag: ${IMAGE_TAG}
EOF
## Update secerts
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
# Update changed image tag
sed -i "/assist-stats/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
mv openreplay/charts/{ingress-nginx,assist-stats,quickwit} /tmp
rm -rf openreplay/charts/*
mv /tmp/{ingress-nginx,assist-stats,quickwit} openreplay/charts/
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# We're not passing -ee flag, because helm will add that.
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging
- name: Alert slack
if: ${{ failure() }}
uses: rtCamp/action-slack-notify@v2
env:
SLACK_CHANNEL: foss
SLACK_TITLE: "Failed ${{ github.workflow }}"
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: 'Build failed :bomb:'
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging

176
assist-stats/.gitignore vendored Normal file
View file

@ -0,0 +1,176 @@
# Created by .ignore support plugin (hsz.mobi)
### JetBrains template
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff:
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/dictionaries
# Sensitive or high-churn files:
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.xml
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
# Gradle:
.idea/**/gradle.xml
.idea/**/libraries
# CMake
cmake-build-debug/
# Mongo Explorer plugin:
.idea/**/mongoSettings.xml
## File-based project format:
*.iws
## Plugin-specific files:
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
### Example user template template
### Example user template
# IntelliJ project files
.idea
*.iml
out
gen### Python template
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
Pipfile.lock
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
# Translations
*.mo
*.pot
# Django stuff:
*.log
.static_storage/
.media/
local_settings.py
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv/*
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
vendor
logs*.txt
*.csv
*.p
SUBNETS.json
README/*
.local

30
assist-stats/Dockerfile Normal file
View file

@ -0,0 +1,30 @@
FROM python:3.11-alpine
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="Shekar Sirikonda<sshekarsiri@gmail.com>"
ARG GIT_SHA
LABEL GIT_SHA=$GIT_SHA
RUN apk add --no-cache build-base tini
ARG envarg
# Add Tini
# Startup daemon
ENV SOURCE_MAP_VERSION=0.7.4 \
APP_NAME=assist_stats \
LISTEN_PORT=8000 \
PRIVATE_ENDPOINTS=false \
ENTERPRISE_BUILD=${envarg} \
GIT_SHA=$GIT_SHA
WORKDIR /work
COPY requirements.txt ./requirements.txt
RUN pip install --no-cache-dir --upgrade -r requirements.txt
COPY . .
RUN mv env.default .env
RUN adduser -u 1001 openreplay -D
USER 1001
ENTRYPOINT ["/sbin/tini", "--"]
CMD ./entrypoint.sh

View file

@ -0,0 +1,7 @@
# ignore .git and .cache folders
.git
.cache
**/build.sh
**/build_*.sh
**/*deploy.sh
Dockerfile*

11
assist-stats/Pipfile Normal file
View file

@ -0,0 +1,11 @@
[[source]]
url = "https://pypi.org/simple"
verify_ssl = true
name = "pypi"
[packages]
[dev-packages]
[requires]
python_version = "3.11"

85
assist-stats/build.sh Normal file
View file

@ -0,0 +1,85 @@
#!/bin/bash
# Script to build api module
# flags to accept:
# envarg: build for enterprise edition.
# Default will be OSS build.
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
# Helper function
exit_err() {
err_code=$1
if [[ $err_code != 0 ]]; then
exit $err_code
fi
}
app="assist_stats" # Set the app variable to "chalice"
environment=$1
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-git_sha}
envarg="default-foss"
chart="$app" # Use the app variable here
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
exit 1
}
return
}
[[ $1 == ee ]] && ee=true
[[ $PATCH -eq 1 ]] && {
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
[[ $ee == "true" ]] && {
image_tag="${image_tag}-ee"
}
}
update_helm_release() {
[[ $ee == "true" ]] && return
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
# Update the chart version
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Update image tags
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Commit the changes
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
git commit -m "chore(helm): Updating $chart image release"
}
function build_api(){
destination="_api"
[[ $1 == "ee" ]] && {
destination="_api_ee"
}
cp -R ../api ../${destination}
cd ../${destination} || exit_err 100
tag=""
# Copy enterprise code
[[ $1 == "ee" ]] && {
cp -rf ../ee/api/* ./
envarg="default-ee"
tag="ee-"
}
mv Dockerfile.dockerignore .dockerignore
docker build -f ./Dockerfile --build-arg envarg=$envarg --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/$app:${image_tag} .
cd ../api || exit_err 100
rm -rf ../${destination}
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/$app:${image_tag}
docker tag ${DOCKER_REPO:-'local'}/$app:${image_tag} ${DOCKER_REPO:-'local'}/$app:${tag}latest
docker push ${DOCKER_REPO:-'local'}/$app:${tag}latest
}
[[ $SIGN_IMAGE -eq 1 ]] && {
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/$app:${image_tag}
}
echo "api docker build completed"
}
check_prereq
build_api $environment
echo buil_complete
if [[ $PATCH -eq 1 ]]; then
update_helm_release
fi

10
assist-stats/env.default Normal file
View file

@ -0,0 +1,10 @@
pg_dbname=postgres
pg_host=127.0.0.1
pg_password=password
pg_port=5432
pg_user=postgres
POOL_SIZE=20
MAX_OVERFLOW=10
POOL_TIMEOUT=30
POOL_RECYCLE=3600

159
assist-stats/main.py Normal file
View file

@ -0,0 +1,159 @@
import logging
from typing import Optional
from fastapi import FastAPI, HTTPException, Depends, Response
from sqlalchemy import Column, Integer, String, DateTime, update, create_engine, PrimaryKeyConstraint, UniqueConstraint
from sqlalchemy.ext.declarative import declarative_base
from pydantic import BaseModel, Field
from datetime import datetime
from decouple import config
from enum import Enum as PythonEnum
from sqlalchemy import Enum
from sqlalchemy import CheckConstraint
from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.orm import sessionmaker, Session
pg_dbname = config("pg_dbname")
pg_host = config("pg_host")
pg_password = config("pg_password")
pg_port = config("pg_port")
pg_user = config("pg_user")
DATABASE_URL = f"postgresql://{pg_user}:{pg_password}@{pg_host}:{pg_port}/{pg_dbname}"
POOL_SIZE = config("POOL_SIZE", default=20, cast=int)
MAX_OVERFLOW = config("MAX_OVERFLOW", default=10, cast=int)
POOL_TIMEOUT = config("POOL_TIMEOUT", default=30, cast=int)
POOL_RECYCLE = config("POOL_RECYCLE", default=3600, cast=int)
app = FastAPI()
engine = create_engine(
DATABASE_URL,
pool_size=POOL_SIZE,
max_overflow=MAX_OVERFLOW,
echo=True,
)
SessionLocal = sessionmaker(
bind=engine,
expire_on_commit=False
)
Base = declarative_base()
@app.on_event("startup")
def startup_db_client():
Base.metadata.create_all(bind=engine)
@app.on_event("shutdown")
def shutdown_db_client():
engine.dispose()
class EventStateEnum(PythonEnum):
start = "start"
end = "end"
class EventTypeEnum(PythonEnum):
live = "live"
assist = "assist"
call = "call"
remote = "remote"
record = "record"
class Event(Base):
__tablename__ = "assist_events"
session_id = Column(String, index=True)
user_uuid = Column(String, nullable=False)
event_type = Column(Enum(EventTypeEnum), nullable=False)
event_state = Column(Enum(EventStateEnum), nullable=False)
timestamp = Column(Integer, nullable=True)
# end_ts = Column(Integer, nullable=True)
user_id = Column(String, nullable=True)
agent_id = Column(String, nullable=True)
# duration = Column(Integer, nullable=True)
created_at = Column(DateTime, default=datetime.now)
__table_args__ = (
PrimaryKeyConstraint('session_id', 'agent_id', 'event_type', 'timestamp', name='pk_session_user_event'),
# UniqueConstraint('session_id', 'user_uuid', 'event_type', 'agent_id', name='uq_session_user_event_agent'),
CheckConstraint(
event_type.in_(['live', 'assist', 'call', 'remote', 'record']),
name='event_type_check'
),
CheckConstraint(
event_state.in_(['start', 'end']),
name='event_state_check'
),
)
class EventCreate(BaseModel):
session_id: str = Field(..., description="The session ID of the event")
user_uuid: str = Field(..., description="The UUID of the user")
event_type: EventTypeEnum = Field(..., description="The type of event")
event_state: EventStateEnum = Field(..., description="The state of the event")
user_id: Optional[str] = Field(None, description="The ID of the user")
agent_id: str = Field(..., description="The ID of the agent")
timestamp: int = Field(..., description="The timestamp of the event")
# end_ts: Optional[int] = Field(default=None, description="The timestamp of the end of the event")
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
@app.post("/events", response_model=EventCreate)
async def create_event_start(event: EventCreate, db: Session = Depends(get_db)):
db_event = Event(
session_id=event.session_id,
user_uuid=event.user_uuid,
event_type=event.event_type,
event_state=event.event_state,
user_id=event.user_id,
agent_id=event.agent_id,
timestamp=event.timestamp,
)
try:
db.add(db_event)
db.commit()
db.refresh(db_event)
return Response(status_code=200)
except SQLAlchemyError as e:
logging.error(f"Error creating event -: {e}")
if "unique constraint" in str(e):
raise HTTPException(status_code=409, detail=str("Event already exists"))
raise HTTPException(status_code=500, detail=str(e))
except Exception as e:
logging.error(f"Error creating event: {e}")
raise HTTPException(status_code=500, detail=str(e))
async def calculate_total_event_duration(event_type: EventTypeEnum, db: Session = Depends(get_db)):
events = db.query(Event).filter(Event.event_type == event_type).order_by(Event.timestamp).all()
total_duration = 0
start_time = None
for event in events:
if event.event_state == EventStateEnum.start:
start_time = event.timestamp
elif event.event_state == EventStateEnum.end and start_time is not None:
total_duration += event.timestamp - start_time
start_time = None
return total_duration

View file

@ -0,0 +1,61 @@
annotated-types==0.5.0
anyio==3.7.1
APScheduler==3.10.4
asyncpg==0.28.0
azure-core==1.29.3
azure-storage-blob==12.16.0
boto3==1.28.42
botocore==1.31.55
certifi==2023.5.7
cffi==1.15.1
charset-normalizer==3.1.0
click==8.1.3
clickhouse-cityhash==1.0.2.4
clickhouse-driver==0.2.6
cryptography==41.0.3
defusedxml==0.7.1
dnspython==2.3.0
elastic-transport==8.4.0
elasticsearch==8.9.0
email-validator==2.0.0.post2
fastapi==0.103.1
gunicorn==20.1.0
h11==0.14.0
httptools==0.5.0
idna==3.4
isodate==0.6.1
jira==3.5.2
jmespath==1.0.1
lz4==4.3.2
oauthlib==3.2.2
packaging==23.1
psycopg2-binary==2.9.7
pycparser==2.21
pydantic==2.3.0
pydantic_core==2.6.3
PyJWT==2.8.0
python-dateutil==2.8.2
python-decouple==3.8
python-dotenv==1.0.0
python-multipart==0.0.6
pytz==2023.3
PyYAML==6.0
redis==5.0.0
requests==2.31.0
requests-oauthlib==1.3.1
requests-toolbelt==1.0.0
s3transfer==0.6.1
sentry-sdk==1.17.0
six==1.16.0
sniffio==1.3.0
SQLAlchemy==2.0.21
starlette==0.27.0
stripe==5.3.0
tenacity==8.2.3
typing_extensions==4.6.3
tzlocal==5.0.1
urllib3==1.26.16
uvicorn==0.23.2
uvloop==0.17.0
watchfiles==0.19.0
websockets==11.0.3

5
assist-stats/run.py Normal file
View file

@ -0,0 +1,5 @@
import uvicorn
from decouple import config
if __name__ == "__main__":
uvicorn.run("main:app", host="127.0.0.1", port=8000, reload=True)

View file

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View file

@ -0,0 +1,24 @@
apiVersion: v2
name: assist-stats
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.7
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
AppVersion: "v1.14.0"

View file

@ -0,0 +1,22 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "assist-stats.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "assist-stats.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "assist-stats.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "assist-stats.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View file

@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "assist-stats.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "assist-stats.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "assist-stats.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "assist-stats.labels" -}}
helm.sh/chart: {{ include "assist-stats.chart" . }}
{{ include "assist-stats.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "assist-stats.selectorLabels" -}}
app.kubernetes.io/name: {{ include "assist-stats.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "assist-stats.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "assist-stats.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,88 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "assist-stats.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "assist-stats.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "assist-stats.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "assist-stats.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "assist-stats.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
shareProcessNamespace: true
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
{{- if .Values.global.enterpriseEditionLicense }}
image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee"
{{- else }}
image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.healthCheck}}
{{- .Values.healthCheck | toYaml | nindent 10}}
{{- end}}
env:
- name: version_number
value: '{{ .Chart.AppVersion }}'
- name: pg_host
value: '{{ .Values.global.postgresql.postgresqlHost }}'
- name: pg_port
value: '{{ .Values.global.postgresql.postgresqlPort }}'
- name: pg_dbname
value: "{{ .Values.global.postgresql.postgresqlDatabase }}"
- name: pg_user
value: '{{ .Values.global.postgresql.postgresqlUser }}'
- name: pg_password
{{- if .Values.global.postgresql.existingSecret }}
valueFrom:
secretKeyRef:
name: {{ .Values.global.postgresql.existingSecret }}
key: postgresql-postgres-password
{{- else }}
value: '{{ .Values.global.postgresql.postgresqlPassword }}'
{{- end}}
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
{{- range $key, $val := .Values.service.ports }}
- name: {{ $key }}
containerPort: {{ $val }}
protocol: TCP
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View file

@ -0,0 +1,29 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "assist-stats.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "assist-stats.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "assist-stats.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,38 @@
{{- if .Values.ingress.enabled }}
{{- $fullName := include "assist-stats.fullname" . -}}
{{- $svcPort := .Values.service.ports.http -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "assist-stats.labels" . | nindent 4 }}
annotations:
nginx.ingress.kubernetes.io/configuration-snippet: |
add_header Cache-Control "no-store,no-cache";
add_header Pragma "no-cache";
nginx.ingress.kubernetes.io/rewrite-target: /$1
{{- with .Values.ingress.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
ingressClassName: "{{ tpl .Values.ingress.className . }}"
tls:
- hosts:
- {{ .Values.global.domainName }}
{{- if .Values.ingress.tls.secretName}}
secretName: {{ .Values.ingress.tls.secretName }}
{{- end}}
rules:
- host: {{ .Values.global.domainName }}
http:
paths:
- pathType: Prefix
backend:
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
path: /api/(.*)
{{- end }}

View file

@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "assist-stats.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "assist-stats.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
{{- range $key, $val := .Values.service.ports }}
- port: {{ $val }}
targetPort: {{ $key }}
protocol: TCP
name: {{ $key }}
{{- end}}
selector:
{{- include "assist-stats.selectorLabels" . | nindent 4 }}

View file

@ -0,0 +1,18 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.serviceMonitor.enabled ) }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "assist-stats.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "assist-stats.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.additionalLabels }}
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
{{- end }}
spec:
endpoints:
{{- .Values.serviceMonitor.scrapeConfigs | toYaml | nindent 4 }}
selector:
matchLabels:
{{- include "assist-stats.selectorLabels" . | nindent 6 }}
{{- end }}

View file

@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "assist-stats.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "assist-stats.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "assist-stats.fullname" . }}-test-connection"
labels:
{{- include "assist-stats.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "assist-stats.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View file

@ -0,0 +1,112 @@
# Default values for openreplay.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: "{{ .Values.global.openReplayContainerRegistry }}/assist-stats"
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: "assist-stats"
fullnameOverride: "assist-stats-openreplay"
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
securityContext:
runAsUser: 1001
runAsGroup: 1001
podSecurityContext:
runAsUser: 1001
runAsGroup: 1001
fsGroup: 1001
fsGroupChangePolicy: "OnRootMismatch"
# podSecurityContext: {}
# fsGroup: 2000
# securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
ports:
http: 8000
metrics: 8888
serviceMonitor:
enabled: false
additionalLabels:
release: observability
scrapeConfigs:
- port: metrics
honorLabels: true
interval: 15s
path: /metrics
scheme: http
scrapeTimeout: 10s
ingress:
enabled: false
className: "{{ .Values.global.ingress.controller.ingressClassResource.name }}"
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls:
secretName: openreplay-ssl
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
env:
# Enable logging for python app
# Ref: https://stackoverflow.com/questions/43969743/logs-in-kubernetes-pod-not-showing-up
PYTHONUNBUFFERED: '0'
nodeSelector: {}
tolerations: []
affinity: {}
healthCheck:
livenessProbe:
httpGet:
path: /
port: 8000
initialDelaySeconds: 120
periodSeconds: 30
timeoutSeconds: 15