chore(helm): Adding shared pvc name as global value
Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
This commit is contained in:
parent
ca5381850a
commit
2b8e008a18
9 changed files with 16 additions and 29 deletions
|
|
@ -148,7 +148,7 @@ spec:
|
|||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
{{- if eq .Values.pvc.name "hostPath" }}
|
||||
{{- if eq (tpl .Values.pvc.name . ) "hostPath" }}
|
||||
volumes:
|
||||
- name: datadir
|
||||
hostPath:
|
||||
|
|
@ -162,7 +162,7 @@ spec:
|
|||
volumes:
|
||||
- name: datadir
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Values.pvc.name }}
|
||||
claimName: "{{ tpl .Values.pvc.name . }}"
|
||||
{{- with .Values.persistence.volumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ pvc:
|
|||
# In case of pvc, you'll have to provide the pvc name.
|
||||
# For example
|
||||
# name: openreplay-efs
|
||||
name: hostPath
|
||||
name: "{{ .Values.global.pvcRWXName }}"
|
||||
hostMountPath: /openreplay/storage/nfs
|
||||
|
||||
persistence: {}
|
||||
|
|
|
|||
|
|
@ -89,7 +89,7 @@ spec:
|
|||
{{- with .Values.persistence.mounts }}
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.pvc.name "hostPath" }}
|
||||
{{- if eq (tpl .Values.pvc.name . ) "hostPath" }}
|
||||
volumes:
|
||||
- name: datadir
|
||||
hostPath:
|
||||
|
|
@ -104,7 +104,7 @@ spec:
|
|||
volumes:
|
||||
- name: datadir
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Values.pvc.name }}
|
||||
claimName: "{{ tpl .Values.pvc.name . }}"
|
||||
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 6 }}
|
||||
{{- with .Values.persistence.volumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ pvc:
|
|||
# In case of pvc, you'll have to provide the pvc name.
|
||||
# For example
|
||||
# name: openreplay-efs
|
||||
name: hostPath
|
||||
name: "{{ .Values.global.pvcRWXName }}"
|
||||
hostMountPath: /openreplay/storage/nfs
|
||||
|
||||
persistence: {}
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ spec:
|
|||
{{- with .Values.persistence.mounts }}
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.pvc.name "hostPath" }}
|
||||
{{- if eq (tpl .Values.pvc.name . ) "hostPath" }}
|
||||
volumes:
|
||||
{{- with .Values.persistence.volumes }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
|
|
@ -114,7 +114,7 @@ spec:
|
|||
{{- end }}
|
||||
- name: datadir
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Values.pvc.name }}
|
||||
claimName: "{{ tpl .Values.pvc.name . }}"
|
||||
{{- end }}
|
||||
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 6 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ pvc:
|
|||
# In case of pvc, you'll have to provide the pvc name.
|
||||
# For example
|
||||
# name: openreplay-efs
|
||||
name: hostPath
|
||||
name: "{{ .Values.global.pvcRWXName }}"
|
||||
hostMountPath: /openreplay/storage/nfs
|
||||
|
||||
persistence: {}
|
||||
|
|
|
|||
|
|
@ -40,7 +40,7 @@ spec:
|
|||
- mountPath: /mnt/efs
|
||||
name: datadir
|
||||
restartPolicy: Never
|
||||
{{- if eq .Values.efsCleaner.pvc.name "hostPath" }}
|
||||
{{- if eq (tpl .Values.efsCleaner.pvc.name . ) "hostPath" }}
|
||||
volumes:
|
||||
- name: datadir
|
||||
hostPath:
|
||||
|
|
@ -51,6 +51,6 @@ spec:
|
|||
volumes:
|
||||
- name: datadir
|
||||
persistentVolumeClaim:
|
||||
claimName: {{ .Values.efsCleaner.pvc.name }}
|
||||
claimName: {{ tpl .Values.efsCleaner.pvc.name . }}
|
||||
{{- end }}
|
||||
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ efsCleaner:
|
|||
# In case of pvc, you'll have to provide the pvc name.
|
||||
# For example
|
||||
# name: openreplay-efs
|
||||
name: hostPath
|
||||
name: "{{ .Values.global.pvcRWXName }}"
|
||||
hostMountPath: /openreplay/storage/nfs
|
||||
|
||||
telemetry:
|
||||
|
|
|
|||
|
|
@ -108,6 +108,10 @@ global:
|
|||
# secret key to inject to assist and peers service
|
||||
assistKey: "SetARandomStringHere"
|
||||
assistJWTSecret: "SetARandomStringHere"
|
||||
# In case of multiple nodes in the kubernetes cluster,
|
||||
# we'll have to create an RWX PVC for shared components.
|
||||
# If it's a single node, we'll use hostVolume, which is the default for the community/oss edition.
|
||||
pvcRWXName: "hostPath"
|
||||
s3:
|
||||
region: "us-east-1"
|
||||
endpoint: "http://minio.db.svc.cluster.local:9000"
|
||||
|
|
@ -117,9 +121,6 @@ global:
|
|||
vaultBucket: "vault-data"
|
||||
# This is only for enterpriseEdition
|
||||
quickwitBucket: "quickwit"
|
||||
# if you're using one node installation, where
|
||||
# you're using local s3, make sure these variables
|
||||
# are same as minio.global.minio.accesskey and secretKey
|
||||
accessKey: "changeMeMinioAccessKey"
|
||||
secretKey: "changeMeMinioPassword"
|
||||
email:
|
||||
|
|
@ -136,20 +137,6 @@ global:
|
|||
enterpriseEditionLicense: ""
|
||||
domainName: ""
|
||||
|
||||
# If there is multiple nodes in the kubernetes cluster,
|
||||
# we'll have to create a NFS share PVC for both the containers to share data.
|
||||
# If it's the single node, we'll use hostVolume, which is default for community installation.
|
||||
# Note: Both PVC name should be same.
|
||||
# sink:
|
||||
# pvc:
|
||||
# name: mysharedpersistence
|
||||
# storage:
|
||||
# pvc:
|
||||
# name: mysharedpersistence
|
||||
# chalice:
|
||||
# pvc:
|
||||
# name: mysharedpersistence
|
||||
|
||||
chalice:
|
||||
env:
|
||||
jwt_secret: "SetARandomStringHere"
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue