Canvas archives (#1938)

* feat(api): added second pre-signed urls for canvases

* feat(api): fixed old key format

* feat(backend): draft version for new approach for canvases

* feat(deploy): fixed some issues

* feat(backend): debug logs for bash command

* feat(backend): removed wrong parameter from bash pipeline

* feat(backend): removed canvas-maker service + small refactoring
This commit is contained in:
Alexander 2024-03-11 17:51:49 +01:00 committed by GitHub
parent 7ab5043d16
commit 89752b2acc
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
24 changed files with 93 additions and 811 deletions

View file

@ -13,17 +13,23 @@ def get_canvas_presigned_urls(session_id, project_id):
{"project_id": project_id, "session_id": session_id})
)
rows = cur.fetchall()
urls = []
for i in range(len(rows)):
params = {
"sessionId": session_id,
"projectId": project_id,
"recordingId": rows[i]["recording_id"]
}
key = config("CANVAS_PATTERN", default="%(sessionId)s/%(recordingId)s.mp4") % params
rows[i] = StorageClient.get_presigned_url_for_sharing(
oldKey = "%(sessionId)s/%(recordingId)s.mp4" % params
key = config("CANVAS_PATTERN", default="%(sessionId)s/%(recordingId)s.tar.zst") % params
urls.append(StorageClient.get_presigned_url_for_sharing(
bucket=config("CANVAS_BUCKET", default=config("sessions_bucket")),
expires_in=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900),
key=key
)
return rows
))
urls.append(StorageClient.get_presigned_url_for_sharing(
bucket=config("CANVAS_BUCKET", default=config("sessions_bucket")),
expires_in=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900),
key=oldKey
))
return urls

View file

@ -5,7 +5,7 @@ ASSIST_JWT_SECRET=
ASSIST_KEY=
ASSIST_URL=http://assist-openreplay.app.svc.cluster.local:9001/assist/%s
assistList=/sockets-list
CANVAS_PATTERN=%(sessionId)s/%(recordingId)s.mp4
CANVAS_PATTERN=%(sessionId)s/%(recordingId)s.tar.zst
captcha_key=
captcha_server=
change_password_link=/reset-password?invitation=%s&&pass=%s

View file

@ -5,7 +5,7 @@ ASSIST_JWT_SECRET=secret
ASSIST_KEY=abc
ASSIST_URL=http://127.0.0.1:9001/assist/%s
assistList=/sockets-list
CANVAS_PATTERN=%(sessionId)s/%(recordingId)s.mp4
CANVAS_PATTERN=%(sessionId)s/%(recordingId)s.tar.zst
captcha_key=
captcha_server=
change_password_link=/changepassword?invitation=%s&&pass=%s

View file

@ -111,6 +111,8 @@ RUN if [ "$SERVICE_NAME" = "http" ]; then \
wget https://static.openreplay.com/geoip/GeoLite2-City.mmdb -O "$MAXMINDDB_FILE"; \
elif [ "$SERVICE_NAME" = "imagestorage" ]; then \
apk add --no-cache zstd; \
elif [ "$SERVICE_NAME" = "canvas-handler" ]; then \
apk add --no-cache zstd; \
elif [ "$SERVICE_NAME" = "canvas-maker" ]; then \
apk add --no-cache ffmpeg; \
fi

View file

@ -1,7 +1,6 @@
package main
import (
"fmt"
"log"
"os"
"os/signal"
@ -13,6 +12,7 @@ import (
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/metrics"
storageMetrics "openreplay/backend/pkg/metrics/imagestorage"
"openreplay/backend/pkg/objectstorage/store"
"openreplay/backend/pkg/queue"
)
@ -24,48 +24,42 @@ func main() {
cfg := config.New()
srv, err := canvas_handler.New(cfg)
objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil {
log.Fatalf("can't init object storage: %s", err)
}
srv, err := canvas_handler.New(cfg, objStore)
if err != nil {
log.Printf("can't init storage service: %s", err)
return
}
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
canvasConsumer := queue.NewConsumer(
cfg.GroupCanvasImage,
[]string{
cfg.TopicCanvasImages,
},
messages.NewImagesMessageIterator(func(data []byte, sessID uint64) {
checkSessionEnd := func(data []byte) (messages.Message, error) {
isSessionEnd := func(data []byte) bool {
reader := messages.NewBytesReader(data)
msgType, err := reader.ReadUint()
if err != nil {
return nil, err
return false
}
if msgType != messages.MsgSessionEnd {
return nil, fmt.Errorf("not a session end message")
return false
}
msg, err := messages.ReadMessage(msgType, reader)
_, err = messages.ReadMessage(msgType, reader)
if err != nil {
return nil, fmt.Errorf("read message err: %s", err)
return false
}
return msg, nil
return true
}
if msg, err := checkSessionEnd(data); err == nil {
sessEnd := msg.(*messages.SessionEnd)
// Received session end
if list, err := srv.PrepareCanvasList(sessID); err != nil {
if isSessionEnd(data) {
if err := srv.PackSessionCanvases(sessID); err != nil {
log.Printf("can't prepare canvas: %s", err)
} else {
for _, name := range list {
sessEnd.EncryptionKey = name
if err := producer.Produce(cfg.TopicCanvasTrigger, sessID, sessEnd.Encode()); err != nil {
log.Printf("can't send session end signal to video service: %s", err)
}
}
}
} else {
if err := srv.SaveCanvasToDisk(sessID, data); err != nil {

View file

@ -1,95 +0,0 @@
package main
import (
"log"
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
config "openreplay/backend/internal/config/videostorage"
"openreplay/backend/internal/video-maker"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/metrics"
storageMetrics "openreplay/backend/pkg/metrics/videostorage"
"openreplay/backend/pkg/objectstorage/store"
"openreplay/backend/pkg/queue"
)
func main() {
m := metrics.New()
m.Register(storageMetrics.List())
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := config.New()
objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil {
log.Fatalf("can't init object storage: %s", err)
}
srv, err := video_maker.New(cfg, objStore)
if err != nil {
log.Printf("can't init storage service: %s", err)
return
}
workDir := cfg.FSDir
canvasConsumer := queue.NewConsumer(
cfg.GroupCanvasVideo,
[]string{
cfg.TopicCanvasTrigger,
},
messages.NewMessageIterator(
func(msg messages.Message) {
sesEnd := msg.(*messages.SessionEnd)
filePath := workDir + "/canvas/" + strconv.FormatUint(sesEnd.SessionID(), 10) + "/"
canvasMix := sesEnd.EncryptionKey // dirty hack to use encryption key as canvas mix holder (only between canvas handler and canvas maker)
if canvasMix == "" {
log.Printf("no canvas mix for session: %d", sesEnd.SessionID())
return
}
if err := srv.Process(sesEnd.SessionID(), filePath, canvasMix); err != nil {
if !strings.Contains(err.Error(), "no such file or directory") {
log.Printf("upload session err: %s, sessID: %d", err, msg.SessionID())
}
}
},
[]int{messages.MsgSessionEnd},
true,
),
false,
cfg.MessageSizeLimit,
)
log.Printf("Canvas maker service started\n")
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
counterTick := time.Tick(time.Second * 30)
for {
select {
case sig := <-sigchan:
log.Printf("Caught signal %v: terminating\n", sig)
srv.Wait()
canvasConsumer.Close()
os.Exit(0)
case <-counterTick:
srv.Wait()
if err := canvasConsumer.Commit(); err != nil {
log.Printf("can't commit messages: %s", err)
}
case msg := <-canvasConsumer.Rebalanced():
log.Println(msg)
default:
err = canvasConsumer.ConsumeNext()
if err != nil {
log.Fatalf("Error on end event consumption: %v", err)
}
}
}
}

View file

@ -6,11 +6,13 @@ import (
"fmt"
"io"
"log"
"openreplay/backend/pkg/objectstorage"
"openreplay/backend/pkg/pool"
"os"
"sort"
"os/exec"
"strconv"
"strings"
"time"
config "openreplay/backend/internal/config/canvas-handler"
)
@ -21,13 +23,20 @@ type saveTask struct {
image *bytes.Buffer
}
type ImageStorage struct {
cfg *config.Config
basePath string
saverPool pool.WorkerPool
type uploadTask struct {
path string
name string
}
func New(cfg *config.Config) (*ImageStorage, error) {
type ImageStorage struct {
cfg *config.Config
basePath string
saverPool pool.WorkerPool
uploaderPool pool.WorkerPool
objStorage objectstorage.ObjectStorage
}
func New(cfg *config.Config, objStorage objectstorage.ObjectStorage) (*ImageStorage, error) {
switch {
case cfg == nil:
return nil, fmt.Errorf("config is empty")
@ -37,10 +46,12 @@ func New(cfg *config.Config) (*ImageStorage, error) {
path += cfg.CanvasDir + "/"
}
s := &ImageStorage{
cfg: cfg,
basePath: path,
cfg: cfg,
basePath: path,
objStorage: objStorage,
}
s.saverPool = pool.NewPool(4, 8, s.writeToDisk)
s.uploaderPool = pool.NewPool(4, 8, s.sendToS3)
return s, nil
}
@ -48,23 +59,33 @@ func (v *ImageStorage) Wait() {
v.saverPool.Pause()
}
func (v *ImageStorage) PrepareCanvasList(sessID uint64) ([]string, error) {
func (v *ImageStorage) sendToS3(payload interface{}) {
task := payload.(*uploadTask)
start := time.Now()
video, err := os.ReadFile(task.path)
if err != nil {
log.Fatalf("Failed to read video file: %v", err)
}
if err := v.objStorage.Upload(bytes.NewReader(video), task.name, "application/octet-stream", objectstorage.Zstd); err != nil {
log.Fatalf("Storage: start uploading replay failed. %s", err)
}
log.Printf("Replay file (size: %d) uploaded successfully in %v", len(video), time.Since(start))
return
}
func (v *ImageStorage) PackSessionCanvases(sessID uint64) error {
path := fmt.Sprintf("%s/%d/", v.basePath, sessID)
// Check that the directory exists
files, err := os.ReadDir(path)
if err != nil {
return nil, err
return err
}
if len(files) == 0 {
return []string{}, nil
return nil
}
type canvasData struct {
files map[int]string
times []int
}
images := make(map[string]*canvasData)
names := make(map[string]bool)
// Build the list of canvas images sets
for _, file := range files {
@ -75,48 +96,31 @@ func (v *ImageStorage) PrepareCanvasList(sessID uint64) ([]string, error) {
continue
}
canvasID := fmt.Sprintf("%s_%s", parts[0], parts[1])
canvasTS, _ := strconv.Atoi(parts[2])
if _, ok := images[canvasID]; !ok {
images[canvasID] = &canvasData{
files: make(map[int]string),
times: make([]int, 0),
}
}
images[canvasID].files[canvasTS] = file.Name()
images[canvasID].times = append(images[canvasID].times, canvasTS)
names[canvasID] = true
}
// Prepare screenshot lists for ffmpeg
namesList := make([]string, 0)
for name, cData := range images {
// Write to file
mixName := fmt.Sprintf("%s-list", name)
mixList := path + mixName
outputFile, err := os.Create(mixList)
sessionID := strconv.FormatUint(sessID, 10)
//
for name := range names {
// Save to archives
archPath := fmt.Sprintf("%s%s.tar.zst", path, name)
fullCmd := fmt.Sprintf("find %s -type f -name '%s*' | tar -cf - --files-from=- | zstd -o %s",
path, name, archPath)
log.Printf("Executing command: %s", fullCmd)
cmd := exec.Command("sh", "-c", fullCmd)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
log.Printf("can't create mix list, err: %s", err)
continue
log.Printf("Failed to execute command: %v, stderr: %v", err, stderr.String())
return err
}
sort.Ints(cData.times)
count := 0
for i := 0; i < len(cData.times)-1; i++ {
dur := float64(cData.times[i+1]-cData.times[i]) / 1000.0
line := fmt.Sprintf("file %s\nduration %.3f\n", cData.files[cData.times[i]], dur)
_, err := outputFile.WriteString(line)
if err != nil {
outputFile.Close()
log.Printf("%s", err)
continue
}
count++
}
outputFile.Close()
log.Printf("new canvas mix %s with %d images", mixList, count)
namesList = append(namesList, mixName)
v.uploaderPool.Submit(&uploadTask{path: archPath, name: sessionID + "/" + name + ".tar.zst"})
}
log.Printf("prepared %d canvas mixes for session %d", len(namesList), sessID)
return namesList, nil
return nil
}
func (v *ImageStorage) SaveCanvasToDisk(sessID uint64, data []byte) error {

View file

@ -3,10 +3,12 @@ package canvas_handler
import (
"openreplay/backend/internal/config/common"
"openreplay/backend/internal/config/configurator"
"openreplay/backend/internal/config/objectstorage"
)
type Config struct {
common.Config
objectstorage.ObjectsConfig
FSDir string `env:"FS_DIR,required"`
CanvasDir string `env:"CANVAS_DIR,default=canvas"`
TopicCanvasImages string `env:"TOPIC_CANVAS_IMAGES,required"`

View file

@ -1,90 +0,0 @@
package video_maker
import (
"bytes"
"fmt"
"log"
"openreplay/backend/pkg/pool"
"os"
"os/exec"
"strconv"
"strings"
"time"
config "openreplay/backend/internal/config/videostorage"
"openreplay/backend/pkg/objectstorage"
)
type uploadTask struct {
path string
name string
}
type VideoStorage struct {
cfg *config.Config
objStorage objectstorage.ObjectStorage
uploaderPool pool.WorkerPool
}
func New(cfg *config.Config, objStorage objectstorage.ObjectStorage) (*VideoStorage, error) {
switch {
case cfg == nil:
return nil, fmt.Errorf("config is empty")
case objStorage == nil:
return nil, fmt.Errorf("object storage is empty")
}
s := &VideoStorage{
cfg: cfg,
objStorage: objStorage,
}
s.uploaderPool = pool.NewPool(4, 4, s.sendToS3)
return s, nil
}
func (v *VideoStorage) Process(sessID uint64, filesPath, canvasMix string) error {
name := strings.TrimSuffix(canvasMix, "-list")
mixList := fmt.Sprintf("%s%s", filesPath, canvasMix)
// check that mixList exists
if _, err := os.ReadFile(mixList); err != nil {
return err
}
videoPath := fmt.Sprintf("%s%s.mp4", filesPath, name)
// Run ffmpeg to build video
start := time.Now()
sessionID := strconv.FormatUint(sessID, 10)
cmd := exec.Command("ffmpeg", "-y", "-f", "concat", "-safe", "0", "-i", mixList, "-vf", "pad=ceil(iw/2)*2:ceil(ih/2)*2", "-vsync", "vfr",
"-pix_fmt", "yuv420p", "-preset", "ultrafast", videoPath)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
log.Printf("Failed to execute command: %v, stderr: %v", err, stderr.String())
return err
}
log.Printf("made video replay in %v", time.Since(start))
v.uploaderPool.Submit(&uploadTask{path: videoPath, name: sessionID + "/" + name + ".mp4"})
return nil
}
func (v *VideoStorage) sendToS3(payload interface{}) {
task := payload.(*uploadTask)
start := time.Now()
video, err := os.ReadFile(task.path)
if err != nil {
log.Fatalf("Failed to read video file: %v", err)
}
if err := v.objStorage.Upload(bytes.NewReader(video), task.name, "video/mp4", objectstorage.NoCompression); err != nil {
log.Fatalf("Storage: start uploading replay failed. %s", err)
}
log.Printf("Viode file (size: %d) uploaded successfully in %v", len(video), time.Since(start))
return
}
func (v *VideoStorage) Wait() {
v.uploaderPool.Pause()
}

View file

@ -6,7 +6,7 @@ ASSIST_KEY=
ASSIST_RECORDS_BUCKET=records
ASSIST_URL=http://assist-openreplay.app.svc.cluster.local:9001/assist/%s
assistList=/sockets-list
CANVAS_PATTERN=%(sessionId)s/%(recordingId)s.mp4
CANVAS_PATTERN=%(sessionId)s/%(recordingId)s.tar.zst
captcha_key=
captcha_server=
CH_COMPRESSION=true

View file

@ -5,7 +5,7 @@ ASSIST_JWT_SECRET=secret
ASSIST_KEY=abc
ASSIST_URL=http://127.0.0.1:9001/assist/%s
assistList=/sockets-list
CANVAS_PATTERN=%(sessionId)s/%(recordingId)s.mp4
CANVAS_PATTERN=%(sessionId)s/%(recordingId)s.tar.zst
captcha_key=
captcha_server=
change_password_link=/changepassword?invitation=%s&&pass=%s

View file

@ -1,9 +1,9 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "imagestorage.fullname" . }}-test-connection"
name: "{{ include "canvas-handler.fullname" . }}-test-connection"
labels:
{{- include "imagestorage.labels" . | nindent 4 }}
{{- include "canvas-handler.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
@ -11,5 +11,5 @@ spec:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "imagestorage.fullname" . }}:{{ .Values.service.port }}']
args: ['{{ include "canvas-handler.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View file

@ -1,23 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View file

@ -1,24 +0,0 @@
apiVersion: v2
name: canvas-maker
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
AppVersion: "v1.17.0"

View file

@ -1,22 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "canvas-maker.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "canvas-maker.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "canvas-maker.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "canvas-maker.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View file

@ -1,62 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "canvas-maker.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "canvas-maker.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "canvas-maker.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "canvas-maker.labels" -}}
helm.sh/chart: {{ include "canvas-maker.chart" . }}
{{ include "canvas-maker.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "canvas-maker.selectorLabels" -}}
app.kubernetes.io/name: {{ include "canvas-maker.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "canvas-maker.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "canvas-maker.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View file

@ -1,131 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "canvas-maker.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "canvas-maker.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "canvas-maker.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "canvas-maker.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "canvas-maker.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
shareProcessNamespace: true
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
{{- if .Values.global.enterpriseEditionLicense }}
image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee"
{{- else }}
image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.healthCheck}}
{{- .Values.healthCheck | toYaml | nindent 10}}
{{- end}}
env:
- name: AWS_ACCESS_KEY_ID
{{- if .Values.global.s3.existingSecret }}
valueFrom:
secretKeyRef:
name: {{ .Values.global.s3.existingSecret }}
key: access-key
{{- else }}
value: {{ .Values.global.s3.accessKey }}
{{- end }}
- name: AWS_SECRET_ACCESS_KEY
{{- if .Values.global.s3.existingSecret }}
valueFrom:
secretKeyRef:
name: {{ .Values.global.s3.existingSecret }}
key: secret-key
{{- else }}
value: {{ .Values.global.s3.secretKey }}
{{- end }}
- name: AWS_ENDPOINT
value: '{{ .Values.global.s3.endpoint }}'
- name: AWS_REGION
value: '{{ .Values.global.s3.region }}'
- name: BUCKET_NAME
value: {{ .Values.global.s3.recordingsBucket }}
- name: LICENSE_KEY
value: '{{ .Values.global.enterpriseEditionLicense }}'
- name: KAFKA_SERVERS
value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}'
- name: KAFKA_USE_SSL
value: '{{ .Values.global.kafka.kafkaUseSsl }}'
{{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }}
{{- range $key, $val := .Values.global.env }}
- name: {{ $key }}
value: '{{ $val }}'
{{- end }}
{{- range $key, $val := .Values.env }}
- name: {{ $key }}
value: '{{ $val }}'
{{- end}}
ports:
{{- range $key, $val := .Values.service.ports }}
- name: {{ $key }}
containerPort: {{ $val }}
protocol: TCP
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
volumeMounts:
- name: datadir
mountPath: /mnt/efs
{{- include "openreplay.volume.redis_ca_certificate.mount" .Values.global.redis | nindent 10 }}
{{- with .Values.persistence.mounts }}
{{- toYaml . | nindent 10 }}
{{- end }}
{{- if eq (tpl .Values.pvc.name . ) "hostPath" }}
volumes:
{{- with .Values.persistence.volumes }}
{{- toYaml . | nindent 6 }}
{{- end }}
- name: datadir
hostPath:
# Ensure the file directory is created.
path: {{ .Values.pvc.hostMountPath }}
type: DirectoryOrCreate
{{- else }}
volumes:
{{- with .Values.persistence.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
- name: datadir
persistentVolumeClaim:
claimName: "{{ tpl .Values.pvc.name . }}"
{{- end }}
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 6 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View file

@ -1,29 +0,0 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "canvas-maker.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "canvas-maker.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "canvas-maker.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View file

@ -1,62 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "canvas-maker.fullname" . -}}
{{- $svcPort := .Values.service.ports.http -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "canvas-maker.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -1,18 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "canvas-maker.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "canvas-maker.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
{{- range $key, $val := .Values.service.ports }}
- port: {{ $val }}
targetPort: {{ $key }}
protocol: TCP
name: {{ $key }}
{{- end}}
selector:
{{- include "canvas-maker.selectorLabels" . | nindent 4 }}

View file

@ -1,18 +0,0 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.serviceMonitor.enabled ) }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "canvas-maker.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "canvas-maker.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.additionalLabels }}
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
{{- end }}
spec:
endpoints:
{{- .Values.serviceMonitor.scrapeConfigs | toYaml | nindent 4 }}
selector:
matchLabels:
{{- include "canvas-maker.selectorLabels" . | nindent 6 }}
{{- end }}

View file

@ -1,13 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "canvas-maker.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "canvas-maker.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -1,15 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "canvas-maker.fullname" . }}-test-connection"
labels:
{{- include "canvas-maker.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "canvas-maker.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View file

@ -1,124 +0,0 @@
# Default values for openreplay.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: "{{ .Values.global.openReplayContainerRegistry }}/canvas-maker"
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: "canvas-maker"
fullnameOverride: "canvas-maker-openreplay"
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
securityContext:
runAsUser: 1001
runAsGroup: 1001
podSecurityContext:
runAsUser: 1001
runAsGroup: 1001
fsGroup: 1001
fsGroupChangePolicy: "OnRootMismatch"
#securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
ports:
http: 9000
metrics: 8888
serviceMonitor:
enabled: true
additionalLabels:
release: observability
scrapeConfigs:
- port: metrics
honorLabels: true
interval: 15s
path: /metrics
scheme: http
scrapeTimeout: 10s
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
env:
FS_CLEAN_HRS: 24
pvc:
# This can be either persistentVolumeClaim or hostPath.
# In case of pvc, you'll have to provide the pvc name.
# For example
# name: openreplay-efs
name: "{{ .Values.global.pvcRWXName }}"
hostMountPath: /openreplay/storage/nfs
persistence: {}
# # Spec of spec.template.spec.containers[*].volumeMounts
# mounts:
# - name: kafka-ssl
# mountPath: /opt/kafka/ssl
# # Spec of spec.template.spec.volumes
# volumes:
# - name: kafka-ssl
# secret:
# secretName: kafka-ssl
nodeSelector: {}
tolerations: []
affinity: {}