fromVersion: "v1.6.0" # Databases specific variables postgresql: &postgres # For generating passwords # `openssl rand -hex 20` postgresqlPassword: "changeMePassword" postgresqlHost: "postgresql.db.svc.cluster.local" postgresqlPort: "5432" postgresqlUser: "postgres" postgresqlDatabase: "postgres" # resources: # requests: # memory: 256Mi # cpu: 250m # limits: # memory: 3000Mi # asdf # cpu: 2 clickhouse: # For enterpriseEdition enabled: false kafka: &kafka # For enterpriseEdition # enabled: true kafkaHost: "kafka.db.svc.cluster.local" # For now, clickhouse doesn't support zookeeper tls intgration. # So we need http endpoint zookeeperHost: "databases-zookeeper.svc.cluster.local" zookeeperNonTLSPort: 2181 kafkaPort: "9092" kafkaUseSsl: "false" maxMessageBytes: _3000000 # deleteTopicEnable: true extraEnvVars: - name: KAFKA_CFG_REPLICA_FETCH_MAX_BYTES value: "3000000" # This value already exist in the kafka values.yaml file, so overriding in top # - name: KAFKA_CFG_MESSAGE_MAX_BYTES # value: "3000000" redis: &redis # For enterpriseEdition # enabled: false redisHost: "redis-master.db.svc.cluster.local" redisPort: "6379" minio: &minio # If you have extrenal s3 storage, like AWS, or GCP # Disable it. enabled: true global: minio: # For generating passwords # `openssl rand -hex 20` accessKey: "changeMeMinioAccessKey" secretKey: "changeMeMinioPassword" ingress-nginx: &ingress-nginx controller: ingressClassResource: # -- Name of the ingressClass name: openreplay # Onlf if Metrics enabled # metrics: # enabled: true # serviceMonitor: # enabled: true # additionalLabels: # release: monitoring # namespaceSelector: # any: true # -- For backwards compatibility with ingress.class annotation, use ingressClass. # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation ingressClass: openreplay service: externalTrafficPolicy: "Local" extraArgs: default-ssl-certificate: "app/openreplay-ssl" config: use-gzip: true load-balance: ewma enable-real-ip: true # Enable LB forwarded protocol # Ref: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#use-forwarded-headers # https://github.com/nginxinc/kubernetes-ingress/issues/1284#issuecomment-872869354 # use-forwarded-headers: true # Ref: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#max-worker-connections max-worker-connections: 0 # SSL redirection ssl-redirect: false force-ssl-redirect: false proxy-body-size: 10m # Application specific variables global: ingress: *ingress-nginx postgresql: *postgres kafka: *kafka redis: *redis minio: *minio s3: region: "us-east-1" endpoint: "http://minio.db.svc.cluster.local:9000" assetsBucket: "sessions-assets" recordingsBucket: "mobs" sourcemapsBucket: "sourcemaps" # if you're using one node installation, where # you're using local s3, make sure these variables # are same as minio.global.minio.accesskey and secretKey accessKey: "changeMeMinioAccessKey" secretKey: "changeMeMinioPassword" email: emailHost: '' emailPort: '587' emailUser: '' emailPassword: '' emailUseTls: 'true' emailUseSsl: 'false' emailSslKey: '' emailSslCert: '' emailFrom: 'OpenReplay' enterpriseEditionLicense: "" domainName: "" # If there is multiple nodes in the kubernetes cluster, # we'll have to create a NFS share PVC for both the containers to share data. # If it's the single node, we'll use hostVolume, which is default for community installation. # Note: Both PVC name should be same. # sink: # pvc: # name: mysharedpersistence # storage: # pvc: # name: mysharedpersistence chalice: env: jwt_secret: "SetARandomStringHere" # captcha_server: '' # captcha_key: '' # SAML2_MD_URL: '' # idp_entityId: '' # idp_sso_url: '' # idp_x509cert: '' # idp_sls_url: '' # idp_name: '' # idp_tenantKey: '' # If you want to override something # chartname: # filedFrom chart/Values.yaml: # key: value # # For example (http): # http: # resources: # limits: # cpu: 1024m # memory: 4096Mi # requests: # cpu: 512m # memory: 2056Mi