From 0759ec2888da934a196de3078c8ed3640d80bbee Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Mon, 13 Feb 2023 13:50:26 +0100 Subject: [PATCH] chore(helm): Updating kafka chart --- .../databases/charts/kafka/.helmignore | 0 .../databases/charts/kafka/Chart.lock | 9 + .../databases/charts/kafka/Chart.yaml | 32 +- .../databases/charts/kafka/README.md | 1043 +++++---- .../charts/kafka/charts/common/.helmignore | 22 + .../charts/kafka/charts/common/Chart.yaml | 23 + .../charts/kafka/charts/common/README.md | 351 +++ .../charts/common/templates/_affinities.tpl | 106 + .../charts/common/templates/_capabilities.tpl | 154 ++ .../kafka/charts/common/templates/_errors.tpl | 23 + .../kafka/charts/common/templates/_images.tpl | 76 + .../charts/common/templates/_ingress.tpl | 68 + .../kafka/charts/common/templates/_labels.tpl | 18 + .../kafka/charts/common/templates/_names.tpl | 66 + .../charts/common/templates/_secrets.tpl | 165 ++ .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../kafka/charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../common/templates/validations/_mysql.tpl | 103 + .../templates/validations/_postgresql.tpl | 129 ++ .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../charts/kafka/charts/common/values.yaml | 5 + .../charts/kafka/charts/zookeeper/.helmignore | 0 .../charts/kafka/charts/zookeeper/Chart.lock | 6 + .../charts/kafka/charts/zookeeper/Chart.yaml | 30 +- .../charts/kafka/charts/zookeeper/README.md | 548 +++-- .../zookeeper/charts/common/.helmignore | 22 + .../charts/zookeeper/charts/common/Chart.yaml | 23 + .../charts/zookeeper/charts/common/README.md | 351 +++ .../charts/common/templates/_affinities.tpl | 106 + .../charts/common/templates/_capabilities.tpl | 154 ++ .../charts/common/templates/_errors.tpl | 23 + .../charts/common/templates/_images.tpl | 76 + .../charts/common/templates/_ingress.tpl | 68 + .../charts/common/templates/_labels.tpl | 18 + .../charts/common/templates/_names.tpl | 66 + .../charts/common/templates/_secrets.tpl | 165 ++ .../charts/common/templates/_storage.tpl | 23 + .../charts/common/templates/_tplvalues.tpl | 13 + .../charts/common/templates/_utils.tpl | 62 + .../charts/common/templates/_warnings.tpl | 14 + .../templates/validations/_cassandra.tpl | 72 + .../common/templates/validations/_mariadb.tpl | 103 + .../common/templates/validations/_mongodb.tpl | 108 + .../common/templates/validations/_mysql.tpl | 103 + .../templates/validations/_postgresql.tpl | 129 ++ .../common/templates/validations/_redis.tpl | 76 + .../templates/validations/_validations.tpl | 46 + .../zookeeper/charts/common/values.yaml | 5 + .../charts/zookeeper/templates/NOTES.txt | 63 +- .../charts/zookeeper/templates/_helpers.tpl | 469 ++-- .../charts/zookeeper/templates/configmap.yaml | 16 +- .../zookeeper/templates/extra-list.yaml | 4 + .../zookeeper/templates/metrics-svc.yaml | 16 +- .../zookeeper/templates/networkpolicy.yaml | 46 +- .../kafka/charts/zookeeper/templates/pdb.yaml | 26 + .../templates/poddisruptionbudget.yaml | 21 - .../zookeeper/templates/prometheusrule.yaml | 27 + .../zookeeper/templates/prometheusrules.yaml | 27 - .../templates/scripts-configmap.yaml | 102 + .../charts/zookeeper/templates/secrets.yaml | 75 +- .../zookeeper/templates/serviceaccount.yaml | 18 +- .../zookeeper/templates/servicemonitor.yaml | 35 +- .../zookeeper/templates/statefulset.yaml | 468 ++-- .../zookeeper/templates/svc-headless.yaml | 36 +- .../kafka/charts/zookeeper/templates/svc.yaml | 63 +- .../zookeeper/templates/tls-secrets.yaml | 55 + .../charts/zookeeper/values-production.yaml | 430 ---- .../charts/kafka/charts/zookeeper/values.yaml | 1089 +++++++--- .../charts/kafka/files/jks/README.md | 10 - .../databases/charts/kafka/kafka.yaml | 521 ----- .../databases/charts/kafka/requirements.lock | 6 - .../databases/charts/kafka/requirements.yaml | 5 - .../charts/kafka/templates/NOTES.txt | 237 ++- .../charts/kafka/templates/_helpers.tpl | 467 ++-- .../charts/kafka/templates/configmap.yaml | 9 +- .../charts/kafka/templates/extra-list.yaml | 7 +- .../charts/kafka/templates/jaas-secret.yaml | 28 +- .../charts/kafka/templates/jks-secret.yaml | 19 - .../charts/kafka/templates/jmx-configmap.yaml | 15 +- .../kafka/templates/jmx-metrics-svc.yaml | 35 +- .../templates/kafka-metrics-deployment.yaml | 142 +- .../kafka-metrics-serviceaccount.yaml | 16 + .../kafka/templates/kafka-metrics-svc.yaml | 37 +- .../templates/kafka-provisioning-secret.yaml | 19 + .../kafka-provisioning-serviceaccount.yaml | 15 + .../kafka/templates/kafka-provisioning.yaml | 260 +++ .../kafka/templates/log4j-configmap.yaml | 9 +- .../kafka/templates/networkpolicy-egress.yaml | 22 + .../templates/networkpolicy-ingress.yaml | 53 + .../kafka/templates/poddisruptionbudget.yaml | 13 +- .../kafka/templates/prometheusrule.yaml | 20 + .../charts/kafka/templates/role.yaml | 11 +- .../charts/kafka/templates/rolebinding.yaml | 15 +- .../kafka/templates/scripts-configmap.yaml | 126 +- .../kafka/templates/serviceaccount.yaml | 16 +- .../templates/servicemonitor-jmx-metrics.yaml | 35 +- .../templates/servicemonitor-metrics.yaml | 35 +- .../charts/kafka/templates/statefulset.yaml | 415 ++-- .../kafka/templates/svc-external-access.yaml | 33 +- .../charts/kafka/templates/svc-headless.yaml | 29 +- .../databases/charts/kafka/templates/svc.yaml | 40 +- .../charts/kafka/templates/tls-secrets.yaml | 31 + .../charts/kafka/values-production.yaml | 931 -------- .../databases/charts/kafka/values.yaml | 1878 ++++++++++++----- 110 files changed, 9560 insertions(+), 4406 deletions(-) mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/.helmignore create mode 100644 scripts/helmcharts/databases/charts/kafka/Chart.lock mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/Chart.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/README.md create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/.helmignore create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/Chart.yaml create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/README.md create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/_affinities.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/_capabilities.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/_errors.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/_images.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/_ingress.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/_labels.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/_names.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/_secrets.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/_storage.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/_tplvalues.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/_utils.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/_warnings.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_cassandra.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_mariadb.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_mongodb.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_mysql.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_postgresql.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_redis.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_validations.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/common/values.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/.helmignore create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/Chart.lock mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/Chart.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/README.md create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/.helmignore create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/Chart.yaml create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/README.md create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_errors.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_images.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_labels.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_names.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_storage.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_utils.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/values.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/extra-list.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/pdb.yaml delete mode 100755 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/prometheusrule.yaml delete mode 100755 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/scripts-configmap.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc.yaml create mode 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/tls-secrets.yaml delete mode 100755 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/values-production.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/charts/zookeeper/values.yaml delete mode 100755 scripts/helmcharts/databases/charts/kafka/files/jks/README.md delete mode 100644 scripts/helmcharts/databases/charts/kafka/kafka.yaml delete mode 100755 scripts/helmcharts/databases/charts/kafka/requirements.lock delete mode 100755 scripts/helmcharts/databases/charts/kafka/requirements.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/NOTES.txt mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/_helpers.tpl mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/configmap.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/extra-list.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/jaas-secret.yaml delete mode 100755 scripts/helmcharts/databases/charts/kafka/templates/jks-secret.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/jmx-configmap.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/jmx-metrics-svc.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-deployment.yaml create mode 100644 scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-serviceaccount.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-svc.yaml create mode 100644 scripts/helmcharts/databases/charts/kafka/templates/kafka-provisioning-secret.yaml create mode 100644 scripts/helmcharts/databases/charts/kafka/templates/kafka-provisioning-serviceaccount.yaml create mode 100644 scripts/helmcharts/databases/charts/kafka/templates/kafka-provisioning.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/log4j-configmap.yaml create mode 100644 scripts/helmcharts/databases/charts/kafka/templates/networkpolicy-egress.yaml create mode 100644 scripts/helmcharts/databases/charts/kafka/templates/networkpolicy-ingress.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/poddisruptionbudget.yaml create mode 100644 scripts/helmcharts/databases/charts/kafka/templates/prometheusrule.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/role.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/rolebinding.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/scripts-configmap.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/serviceaccount.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-metrics.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/statefulset.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/svc-external-access.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/svc-headless.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/templates/svc.yaml create mode 100644 scripts/helmcharts/databases/charts/kafka/templates/tls-secrets.yaml delete mode 100755 scripts/helmcharts/databases/charts/kafka/values-production.yaml mode change 100755 => 100644 scripts/helmcharts/databases/charts/kafka/values.yaml diff --git a/scripts/helmcharts/databases/charts/kafka/.helmignore b/scripts/helmcharts/databases/charts/kafka/.helmignore old mode 100755 new mode 100644 diff --git a/scripts/helmcharts/databases/charts/kafka/Chart.lock b/scripts/helmcharts/databases/charts/kafka/Chart.lock new file mode 100644 index 000000000..39d54db13 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/Chart.lock @@ -0,0 +1,9 @@ +dependencies: +- name: zookeeper + repository: https://charts.bitnami.com/bitnami + version: 11.1.0 +- name: common + repository: https://charts.bitnami.com/bitnami + version: 2.2.2 +digest: sha256:da099b68bc1deabb4998fd87b4141440f26dba1a14801a507c402247830e75ee +generated: "2023-01-24T02:09:12.655952782Z" diff --git a/scripts/helmcharts/databases/charts/kafka/Chart.yaml b/scripts/helmcharts/databases/charts/kafka/Chart.yaml old mode 100755 new mode 100644 index 165e70d55..17192c343 --- a/scripts/helmcharts/databases/charts/kafka/Chart.yaml +++ b/scripts/helmcharts/databases/charts/kafka/Chart.yaml @@ -1,11 +1,23 @@ annotations: category: Infrastructure -apiVersion: v1 -appVersion: 2.6.0 -description: Apache Kafka is a distributed streaming platform. -engine: gotpl -home: https://github.com/bitnami/charts/tree/master/bitnami/kafka -icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-110x117.png + licenses: Apache-2.0 +apiVersion: v2 +appVersion: 3.3.2 +dependencies: +- condition: zookeeper.enabled + name: zookeeper + repository: https://charts.bitnami.com/bitnami + version: 11.x.x +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 2.x.x +description: Apache Kafka is a distributed streaming platform designed to build real-time + pipelines and can be used as a message broker or as a replacement for a log aggregation + solution for big data applications. +home: https://github.com/bitnami/charts/tree/main/bitnami/kafka +icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-220x234.png keywords: - kafka - zookeeper @@ -13,10 +25,10 @@ keywords: - producer - consumer maintainers: -- email: containers@bitnami.com - name: Bitnami +- name: Bitnami + url: https://github.com/bitnami/charts name: kafka sources: -- https://github.com/bitnami/bitnami-docker-kafka +- https://github.com/bitnami/containers/tree/main/bitnami/kafka - https://kafka.apache.org/ -version: 11.8.6 +version: 20.0.6 diff --git a/scripts/helmcharts/databases/charts/kafka/README.md b/scripts/helmcharts/databases/charts/kafka/README.md old mode 100755 new mode 100644 index 5584bd43d..9ee6f328b --- a/scripts/helmcharts/databases/charts/kafka/README.md +++ b/scripts/helmcharts/databases/charts/kafka/README.md @@ -1,24 +1,30 @@ -# Kafka + -[Kafka](https://www.kafka.org/) is a distributed streaming platform used for building real-time data pipelines and streaming apps. It is horizontally scalable, fault-tolerant, wicked fast, and runs in production in thousands of companies. +# Apache Kafka packaged by Bitnami +Apache Kafka is a distributed streaming platform designed to build real-time pipelines and can be used as a message broker or as a replacement for a log aggregation solution for big data applications. + +[Overview of Apache Kafka](http://kafka.apache.org/) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + ## TL;DR ```console -helm repo add bitnami https://charts.bitnami.com/bitnami -helm install my-release bitnami/kafka +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/kafka ``` ## Introduction -This chart bootstraps a [Kafka](https://github.com/bitnami/bitnami-docker-kafka) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. +This chart bootstraps a [Kafka](https://github.com/bitnami/containers/tree/main/bitnami/kafka) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. -Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. ## Prerequisites -- Kubernetes 1.12+ -- Helm 2.12+ or Helm 3.0-beta3+ +- Kubernetes 1.19+ +- Helm 3.2.0+ - PV provisioner support in the underlying infrastructure ## Installing the Chart @@ -26,8 +32,8 @@ Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment To install the chart with the release name `my-release`: ```console -helm repo add bitnami https://charts.bitnami.com/bitnami -helm install my-release bitnami/kafka +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/kafka ``` These commands deploy Kafka on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. @@ -39,240 +45,460 @@ These commands deploy Kafka on the Kubernetes cluster in the default configurati To uninstall/delete the `my-release` deployment: ```console -helm delete my-release +$ helm delete my-release ``` The command removes all the Kubernetes components associated with the chart and deletes the release. ## Parameters -The following tables lists the configurable parameters of the Kafka chart and their default values per section/component: - ### Global parameters -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `global.imageRegistry` | Global Docker image registry | `nil` | -| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | -| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + ### Common parameters -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `nameOverride` | String to partially override kafka.fullname | `nil` | -| `fullnameOverride` | String to fully override kafka.fullname | `nil` | -| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | -| `commonLabels` | Labels to add to all deployed objects | `{}` | -| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | -| `extraDeploy` | Array of extra objects to deploy with the release | `nil` (evaluated as a template) | +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | + ### Kafka parameters -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `image.registry` | Kafka image registry | `docker.io` | -| `image.repository` | Kafka image name | `bitnami/kafka` | -| `image.tag` | Kafka image tag | `{TAG_NAME}` | -| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | -| `image.debug` | Set to true if you would like to see extra information on logs | `false` | -| `config` | Configuration file for Kafka. Auto-generated based on other parameters when not specified | `nil` | -| `existingConfigmap` | Name of existing ConfigMap with Kafka configuration | `nil` | -| `log4j` | An optional log4j.properties file to overwrite the default of the Kafka brokers. | `nil` | -| `existingLog4jConfigMap` | The name of an existing ConfigMap containing a log4j.properties file. | `nil` | -| `heapOpts` | Kafka's Java Heap size | `-Xmx1024m -Xms1024m` | -| `deleteTopicEnable` | Switch to enable topic deletion or not | `false` | -| `autoCreateTopicsEnable` | Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments | `false` | -| `logFlushIntervalMessages` | The number of messages to accept before forcing a flush of data to disk | `10000` | -| `logFlushIntervalMs` | The maximum amount of time a message can sit in a log before we force a flush | `1000` | -| `logRetentionBytes` | A size-based retention policy for logs | `_1073741824` | -| `logRetentionCheckIntervalMs` | The interval at which log segments are checked to see if they can be deleted | `300000` | -| `logRetentionHours` | The minimum age of a log file to be eligible for deletion due to age | `168` | -| `logSegmentBytes` | The maximum size of a log segment file. When this size is reached a new log segment will be created | `_1073741824` | -| `logsDirs` | A comma separated list of directories under which to store log files | `/bitnami/kafka/data` | -| `maxMessageBytes` | The largest record batch size allowed by Kafka | `1000012` | -| `defaultReplicationFactor` | Default replication factors for automatically created topics | `1` | -| `offsetsTopicReplicationFactor` | The replication factor for the offsets topic | `1` | -| `transactionStateLogReplicationFactor` | The replication factor for the transaction topic | `1` | -| `transactionStateLogMinIsr` | Overridden min.insync.replicas config for the transaction topic | `1` | -| `numIoThreads` | The number of threads doing disk I/O | `8` | -| `numNetworkThreads` | The number of threads handling network requests | `3` | -| `numPartitions` | The default number of log partitions per topic | `1` | -| `numRecoveryThreadsPerDataDir` | The number of threads per data directory to be used for log recovery at startup and flushing at shutdown | `1` | -| `socketReceiveBufferBytes` | The receive buffer (SO_RCVBUF) used by the socket server | `102400` | -| `socketRequestMaxBytes` | The maximum size of a request that the socket server will accept (protection against OOM) | `_104857600` | -| `socketSendBufferBytes` | The send buffer (SO_SNDBUF) used by the socket server | `102400` | -| `zookeeperConnectionTimeoutMs` | Timeout in ms for connecting to Zookeeper | `6000` | -| `extraEnvVars` | Extra environment variables to add to kafka pods | `[]` | -| `extraVolumes` | Extra volume(s) to add to Kafka statefulset | `[]` | -| `extraVolumeMounts` | Extra volumeMount(s) to add to Kafka containers | `[]` | -| `auth.clientProtocol` | Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | -| `auth.interBrokerProtocol` | Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | -| `auth.saslMechanisms` | SASL mechanisms when either `auth.interBrokerProtocol` or `auth.clientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` | `plain,scram-sha-256,scram-sha-512` | -| `auth.saslInterBrokerMechanism` | SASL mechanism to use as inter broker protocol, it must be included at `auth.saslMechanisms` | `plain` | -| `auth.jksSecret` | Name of the existing secret containing the truststore and one keystore per Kafka broker you have in the cluster | `nil` | -| `auth.jksPassword` | Password to access the JKS files when they are password-protected | `nil` | -| `auth.tlsEndpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` | -| `auth.jaas.interBrokerUser` | Kafka inter broker communication user for SASL authentication | `admin` | -| `auth.jaas.interBrokerPassword` | Kafka inter broker communication password for SASL authentication | `nil` | -| `auth.jaas.zookeeperUser` | Kafka Zookeeper user for SASL authentication | `nil` | -| `auth.jaas.zookeeperPassword` | Kafka Zookeeper password for SASL authentication | `nil` | -| `auth.jaas.existingSecret` | Name of the existing secret containing credentials for brokerUser, interBrokerUser and zookeeperUser | `nil` | -| `auth.jaas.clientUsers` | List of Kafka client users to be created, separated by commas. This values will override `auth.jaas.clientUser` | `[]` | -| `auth.jaas.clientPasswords` | List of passwords for `auth.jaas.clientUsers`. It is mandatory to provide the passwords when using `auth.jaas.clientUsers` | `[]` | -| `listeners` | The address(es) the socket server listens on. Auto-calculated it's set to an empty array | `[]` | -| `advertisedListeners` | The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array | `[]` | -| `listenerSecurityProtocolMap` | The protocol->listener mapping. Auto-calculated it's set to nil | `nil` | -| `allowPlaintextListener` | Allow to use the PLAINTEXT listener | `true` | -| `interBrokerListenerName` | The listener that the brokers should communicate on | `INTERNAL` | +| Name | Description | Value | +| ------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------- | +| `image.registry` | Kafka image registry | `docker.io` | +| `image.repository` | Kafka image repository | `bitnami/kafka` | +| `image.tag` | Kafka image tag (immutable tags are recommended) | `3.3.2-debian-11-r0` | +| `image.digest` | Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `config` | Configuration file for Kafka. Auto-generated based on other parameters when not specified | `""` | +| `existingConfigmap` | ConfigMap with Kafka Configuration | `""` | +| `log4j` | An optional log4j.properties file to overwrite the default of the Kafka brokers | `""` | +| `existingLog4jConfigMap` | The name of an existing ConfigMap containing a log4j.properties file | `""` | +| `heapOpts` | Kafka Java Heap size | `-Xmx1024m -Xms1024m` | +| `deleteTopicEnable` | Switch to enable topic deletion or not | `false` | +| `autoCreateTopicsEnable` | Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments | `true` | +| `logFlushIntervalMessages` | The number of messages to accept before forcing a flush of data to disk | `_10000` | +| `logFlushIntervalMs` | The maximum amount of time a message can sit in a log before we force a flush | `1000` | +| `logRetentionBytes` | A size-based retention policy for logs | `_1073741824` | +| `logRetentionCheckIntervalMs` | The interval at which log segments are checked to see if they can be deleted | `300000` | +| `logRetentionHours` | The minimum age of a log file to be eligible for deletion due to age | `168` | +| `logSegmentBytes` | The maximum size of a log segment file. When this size is reached a new log segment will be created | `_1073741824` | +| `logsDirs` | A comma separated list of directories in which kafka's log data is kept | `/bitnami/kafka/data` | +| `maxMessageBytes` | The largest record batch size allowed by Kafka | `_1000012` | +| `defaultReplicationFactor` | Default replication factors for automatically created topics | `1` | +| `offsetsTopicReplicationFactor` | The replication factor for the offsets topic | `1` | +| `transactionStateLogReplicationFactor` | The replication factor for the transaction topic | `1` | +| `transactionStateLogMinIsr` | Overridden min.insync.replicas config for the transaction topic | `1` | +| `numIoThreads` | The number of threads doing disk I/O | `8` | +| `numNetworkThreads` | The number of threads handling network requests | `3` | +| `numPartitions` | The default number of log partitions per topic | `1` | +| `numRecoveryThreadsPerDataDir` | The number of threads per data directory to be used for log recovery at startup and flushing at shutdown | `1` | +| `socketReceiveBufferBytes` | The receive buffer (SO_RCVBUF) used by the socket server | `102400` | +| `socketRequestMaxBytes` | The maximum size of a request that the socket server will accept (protection against OOM) | `_104857600` | +| `socketSendBufferBytes` | The send buffer (SO_SNDBUF) used by the socket server | `102400` | +| `zookeeperConnectionTimeoutMs` | Timeout in ms for connecting to ZooKeeper | `6000` | +| `zookeeperChrootPath` | Path which puts data under some path in the global ZooKeeper namespace | `""` | +| `authorizerClassName` | The Authorizer is configured by setting authorizer.class.name=kafka.security.authorizer.AclAuthorizer in server.properties | `""` | +| `allowEveryoneIfNoAclFound` | By default, if a resource has no associated ACLs, then no one is allowed to access that resource except super users | `true` | +| `superUsers` | You can add super users in server.properties | `User:admin` | +| `auth.clientProtocol` | Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.externalClientProtocol` | Authentication protocol for communications with external clients. Defaults to value of `auth.clientProtocol`. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `""` | +| `auth.interBrokerProtocol` | Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.sasl.mechanisms` | SASL mechanisms when either `auth.interBrokerProtocol`, `auth.clientProtocol` or `auth.externalClientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` | `plain,scram-sha-256,scram-sha-512` | +| `auth.sasl.interBrokerMechanism` | SASL mechanism for inter broker communication. | `plain` | +| `auth.sasl.jaas.clientUsers` | Kafka client user list | `["user"]` | +| `auth.sasl.jaas.clientPasswords` | Kafka client passwords. This is mandatory if more than one user is specified in clientUsers | `[]` | +| `auth.sasl.jaas.interBrokerUser` | Kafka inter broker communication user for SASL authentication | `admin` | +| `auth.sasl.jaas.interBrokerPassword` | Kafka inter broker communication password for SASL authentication | `""` | +| `auth.sasl.jaas.zookeeperUser` | Kafka ZooKeeper user for SASL authentication | `""` | +| `auth.sasl.jaas.zookeeperPassword` | Kafka ZooKeeper password for SASL authentication | `""` | +| `auth.sasl.jaas.existingSecret` | Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser | `""` | +| `auth.tls.type` | Format to use for TLS certificates. Allowed types: `jks` and `pem` | `jks` | +| `auth.tls.pemChainIncluded` | Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. | `false` | +| `auth.tls.existingSecrets` | Array existing secrets containing the TLS certificates for the Kafka brokers | `[]` | +| `auth.tls.autoGenerated` | Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `auth.tls.type` is `pem` | `false` | +| `auth.tls.password` | Password to access the JKS files or PEM key when they are password-protected. | `""` | +| `auth.tls.existingSecret` | Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) | `""` | +| `auth.tls.jksTruststoreSecret` | Name of the existing secret containing your truststore if truststore not existing or different from the ones in the `auth.tls.existingSecrets` | `""` | +| `auth.tls.jksKeystoreSAN` | The secret key from the `auth.tls.existingSecrets` containing the keystore with a SAN certificate | `""` | +| `auth.tls.jksTruststore` | The secret key from the `auth.tls.existingSecrets` or `auth.tls.jksTruststoreSecret` containing the truststore | `""` | +| `auth.tls.endpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` | +| `auth.zookeeper.tls.enabled` | Enable TLS for Zookeeper client connections. | `false` | +| `auth.zookeeper.tls.type` | Format to use for TLS certificates. Allowed types: `jks` and `pem`. | `jks` | +| `auth.zookeeper.tls.verifyHostname` | Hostname validation. | `true` | +| `auth.zookeeper.tls.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper client communications. | `""` | +| `auth.zookeeper.tls.existingSecretKeystoreKey` | The secret key from the auth.zookeeper.tls.existingSecret containing the Keystore. | `zookeeper.keystore.jks` | +| `auth.zookeeper.tls.existingSecretTruststoreKey` | The secret key from the auth.zookeeper.tls.existingSecret containing the Truststore. | `zookeeper.truststore.jks` | +| `auth.zookeeper.tls.passwordsSecret` | Existing secret containing Keystore and Truststore passwords. | `""` | +| `auth.zookeeper.tls.passwordsSecretKeystoreKey` | The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Keystore. | `keystore-password` | +| `auth.zookeeper.tls.passwordsSecretTruststoreKey` | The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Truststore. | `truststore-password` | +| `listeners` | The address(es) the socket server listens on. Auto-calculated it's set to an empty array | `[]` | +| `advertisedListeners` | The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array | `[]` | +| `listenerSecurityProtocolMap` | The protocol->listener mapping. Auto-calculated it's set to nil | `""` | +| `allowPlaintextListener` | Allow to use the PLAINTEXT listener | `true` | +| `interBrokerListenerName` | The listener that the brokers should communicate on | `INTERNAL` | +| `command` | Override Kafka container command | `["/scripts/setup.sh"]` | +| `args` | Override Kafka container arguments | `[]` | +| `extraEnvVars` | Extra environment variables to add to Kafka pods | `[]` | +| `extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `extraEnvVarsSecret` | Secret with extra environment variables | `""` | + ### Statefulset parameters -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `replicaCount` | Number of Kafka nodes | `1` | -| `updateStrategy` | Update strategy for the stateful set | `RollingUpdate` | -| `rollingUpdatePartition` | Partition update strategy | `nil` | -| `podLabels` | Kafka pod labels | `{}` (evaluated as a template) | -| `podAnnotations` | Kafka Pod annotations | `{}` (evaluated as a template) | -| `affinity` | Affinity for pod assignment | `{}` (evaluated as a template) | -| `priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | -| `nodeSelector` | Node labels for pod assignment | `{}` (evaluated as a template) | -| `tolerations` | Tolerations for pod assignment | `[]` (evaluated as a template) | -| `podSecurityContext` | Kafka pods' Security Context | `{}` | -| `containerSecurityContext` | Kafka containers' Security Context | `{}` | -| `resources.limits` | The resources limits for Kafka containers | `{}` | -| `resources.requests` | The requested resources for Kafka containers | `{}` | -| `livenessProbe` | Liveness probe configuration for Kafka | `Check values.yaml file` | -| `readinessProbe` | Readiness probe configuration for Kafka | `Check values.yaml file` | -| `customLivenessProbe` | Custom Liveness probe configuration for Kafka | `{}` | -| `customReadinessProbe` | Custom Readiness probe configuration for Kafka | `{}` | -| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | -| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `nil` | -| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `1` | -| `command` | Override kafka container command | `['/scripts/setup.sh']` (evaluated as a template) | -| `args` | Override kafka container arguments | `[]` (evaluated as a template) | -| `sidecars` | Attach additional sidecar containers to the Kafka pod | `{}` | +| Name | Description | Value | +| --------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `replicaCount` | Number of Kafka nodes | `1` | +| `minBrokerId` | Minimal broker.id value, nodes increment their `broker.id` respectively | `0` | +| `brokerRackAssignment` | Set Broker Assignment for multi tenant environment Allowed values: `aws-az` | `""` | +| `containerPorts.client` | Kafka client container port | `9092` | +| `containerPorts.internal` | Kafka inter-broker container port | `9093` | +| `containerPorts.external` | Kafka external container port | `9094` | +| `livenessProbe.enabled` | Enable livenessProbe on Kafka containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe on Kafka containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe on Kafka containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `lifecycleHooks` | lifecycleHooks for the Kafka container to automate configuration before or after startup | `{}` | +| `resources.limits` | The resources limits for the container | `{}` | +| `resources.requests` | The requested resources for the container | `{}` | +| `podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `podSecurityContext.fsGroup` | Set Kafka pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enable Kafka containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set Kafka containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set Kafka containers' Security Context runAsNonRoot | `true` | +| `containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as nonprivilege | `false` | +| `hostAliases` | Kafka pods host aliases | `[]` | +| `hostNetwork` | Specify if host network should be enabled for Kafka pods | `false` | +| `hostIPC` | Specify if host IPC should be enabled for Kafka pods | `false` | +| `podLabels` | Extra labels for Kafka pods | `{}` | +| `podAnnotations` | Extra annotations for Kafka pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to gracefully terminate | `""` | +| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel | `Parallel` | +| `priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `updateStrategy.type` | Kafka statefulset strategy type | `RollingUpdate` | +| `updateStrategy.rollingUpdate` | Kafka statefulset rolling update configuration parameters | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the Kafka pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the Kafka pod(s) | `[]` | +| `initContainers` | Add additional Add init containers to the Kafka pod(s) | `[]` | +| `pdb.create` | Deploy a pdb object for the Kafka pod | `false` | +| `pdb.minAvailable` | Maximum number/percentage of unavailable Kafka replicas | `""` | +| `pdb.maxUnavailable` | Maximum number/percentage of unavailable Kafka replicas | `1` | -### Exposure parameters -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `service.type` | Kubernetes Service type | `ClusterIP` | -| `service.port` | Kafka port for client connections | `9092` | -| `service.internalPort` | Kafka port for inter-broker connections | `9093` | -| `service.externalPort` | Kafka port for external connections | `9094` | -| `service.nodePorts.client` | Nodeport for client connections | `""` | -| `service.nodePorts.external` | Nodeport for external connections | `""` | -| `service.loadBalancerIP` | loadBalancerIP for Kafka Service | `nil` | -| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | -| `service.annotations` | Service annotations | `{}`(evaluated as a template) | -| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | -| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | -| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry (kubectl) | `docker.io` | -| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image name (kubectl) | `bitnami/kubectl` | -| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (kubectl) | `{TAG_NAME}` | -| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy (kubectl) | `Always` | -| `externalAccess.autoDiscovery.resources.limits` | Init container auto-discovery resource limits | `{}` | -| `externalAccess.autoDiscovery.resources.requests` | Init container auto-discovery resource requests | `{}` | -| `externalAccess.service.type` | Kubernetes Service type for external access. It can be NodePort or LoadBalancer | `LoadBalancer` | -| `externalAccess.service.port` | Kafka port used for external access when service type is LoadBalancer | `9094` | -| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for Kafka brokers | `[]` | -| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | -| `externalAccess.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort | `nil` | -| `externalAccess.service.nodePorts` | Array of node ports used to configure Kafka external listener when service type is NodePort | `[]` | -| `externalAccess.service.annotations` | Service annotations for external access | `{}`(evaluated as a template) | +### Traffic Exposure parameters + +| Name | Description | Value | +| ------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | --------------------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | Kafka svc port for client connections | `9092` | +| `service.ports.internal` | Kafka svc port for inter-broker connections | `9093` | +| `service.ports.external` | Kafka svc port for external connections | `9094` | +| `service.nodePorts.client` | Node port for the Kafka client connections | `""` | +| `service.nodePorts.external` | Node port for the Kafka external connections | `""` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | Kafka service Cluster IP | `""` | +| `service.loadBalancerIP` | Kafka service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | Kafka service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | Kafka service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for Kafka service | `{}` | +| `service.headless.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `service.headless.annotations` | Annotations for the headless service. | `{}` | +| `service.headless.labels` | Labels for the headless service. | `{}` | +| `service.extraPorts` | Extra ports to expose in the Kafka service (normally used with the `sidecar` value) | `[]` | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry | `docker.io` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image repository | `bitnami/kubectl` | +| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (immutable tags are recommended) | `1.25.6-debian-11-r1` | +| `externalAccess.autoDiscovery.image.digest` | Petete image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy | `IfNotPresent` | +| `externalAccess.autoDiscovery.image.pullSecrets` | Init container auto-discovery image pull secrets | `[]` | +| `externalAccess.autoDiscovery.resources.limits` | The resources limits for the auto-discovery init container | `{}` | +| `externalAccess.autoDiscovery.resources.requests` | The requested resources for the auto-discovery init container | `{}` | +| `externalAccess.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.service.ports.external` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerNames` | Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerAnnotations` | Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.service.nodePorts` | Array of node ports used for each Kafka broker. Length must be the same as replicaCount | `[]` | +| `externalAccess.service.useHostIPs` | Use service host IPs to configure Kafka external listener when service type is NodePort | `false` | +| `externalAccess.service.usePodIPs` | using the MY_POD_IP address for external access. | `false` | +| `externalAccess.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP | `""` | +| `externalAccess.service.publishNotReadyAddresses` | Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready | `false` | +| `externalAccess.service.labels` | Service labels for external access | `{}` | +| `externalAccess.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.service.extraPorts` | Extra ports to expose in the Kafka external service | `[]` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | +| `networkPolicy.externalAccess.from` | customize the from section for External Access on tcp-external port | `[]` | +| `networkPolicy.egressRules.customRules` | Custom network policy rule | `{}` | + ### Persistence parameters -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `persistence.enabled` | Enable Kafka data persistence using PVC, note that Zookeeper persistence is unaffected | `true` | -| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template | `nil` | -| `persistence.storageClass` | PVC Storage Class for Kafka data volume | `nil` | -| `persistence.accessMode` | PVC Access Mode for Kafka data volume | `ReadWriteOnce` | -| `persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | -| `persistence.annotations` | Annotations for the PVC | `{}`(evaluated as a template) | +| Name | Description | Value | +| ------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------- | ------------------------- | +| `persistence.enabled` | Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected | `true` | +| `persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `persistence.storageClass` | PVC Storage Class for Kafka data volume | `""` | +| `persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `persistence.labels` | Labels for the PVC | `{}` | +| `persistence.selector` | Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `persistence.mountPath` | Mount path of the Kafka data volume | `/bitnami/kafka` | +| `logPersistence.enabled` | Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected | `false` | +| `logPersistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `logPersistence.storageClass` | PVC Storage Class for Kafka logs volume | `""` | +| `logPersistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `logPersistence.size` | PVC Storage Request for Kafka logs volume | `8Gi` | +| `logPersistence.annotations` | Annotations for the PVC | `{}` | +| `logPersistence.selector` | Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `logPersistence.mountPath` | Mount path of the Kafka logs volume | `/opt/bitnami/kafka/logs` | -### RBAC parameters - -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `serviceAccount.create` | Enable creation of ServiceAccount for Kafka pods | `true` | -| `serviceAccount.name` | Name of the created serviceAccount | Generated using the `kafka.fullname` template | -| `rbac.create` | Weather to create & use RBAC resources or not | `false` | ### Volume Permissions parameters -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | -| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | -| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | -| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | -| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | -| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | -| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | -| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| Name | Description | Value | +| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r75` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for Kafka pods | `true` | +| `serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rbac.create` | Whether to create & use RBAC resources or not | `false` | + ### Metrics parameters -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | -| `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` | -| `metrics.kafka.image.repository` | Kafka exporter image name | `bitnami/kafka-exporter` | -| `metrics.kafka.image.tag` | Kafka exporter image tag | `{TAG_NAME}` | -| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | -| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | -| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | -| `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `nil` | -| `metrics.kafka.resources.limits` | Kafka Exporter container resource limits | `{}` | -| `metrics.kafka.resources.requests` | Kafka Exporter container resource requests | `{}` | -| `metrics.kafka.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for Kafka Exporter | `ClusterIP` | -| `metrics.kafka.service.port` | Kafka Exporter Prometheus port | `9308` | -| `metrics.kafka.service.nodePort` | Kubernetes HTTP node port | `""` | -| `metrics.kafka.service.annotations` | Annotations for Prometheus metrics service | `Check values.yaml file` | -| `metrics.kafka.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | -| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `nil` | -| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | -| `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` | -| `metrics.jmx.image.repository` | JMX exporter image name | `bitnami/jmx-exporter` | -| `metrics.jmx.image.tag` | JMX exporter image tag | `{TAG_NAME}` | -| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | -| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | -| `metrics.jmx.resources.limits` | JMX Exporter container resource limits | `{}` | -| `metrics.jmx.resources.requests` | JMX Exporter container resource requests | `{}` | -| `metrics.jmx.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for JMX Exporter | `ClusterIP` | -| `metrics.jmx.service.port` | JMX Exporter Prometheus port | `5556` | -| `metrics.jmx.service.nodePort` | Kubernetes HTTP node port | `""` | -| `metrics.jmx.service.annotations` | Annotations for Prometheus metrics service | `Check values.yaml file` | -| `metrics.jmx.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | -| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `nil` | -| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX Exporter | (see `values.yaml`) | -| `metrics.jmx.config` | Configuration file for JMX exporter | (see `values.yaml`) | -| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `nil` | -| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | -| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `monitoring` | -| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `nil` | -| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `nil` (Prometheus Operator default value) | -| `metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `nil` (Prometheus Operator default value) | +| Name | Description | Value | +| ----------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------- | +| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | +| `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` | +| `metrics.kafka.image.repository` | Kafka exporter image repository | `bitnami/kafka-exporter` | +| `metrics.kafka.image.tag` | Kafka exporter image tag (immutable tags are recommended) | `1.6.0-debian-11-r52` | +| `metrics.kafka.image.digest` | Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | +| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `""` | +| `metrics.kafka.tlsCert` | The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) | `cert-file` | +| `metrics.kafka.tlsKey` | The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) | `key-file` | +| `metrics.kafka.tlsCaSecret` | Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication | `""` | +| `metrics.kafka.tlsCaCert` | The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) | `ca-file` | +| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | +| `metrics.kafka.command` | Override Kafka exporter container command | `[]` | +| `metrics.kafka.args` | Override Kafka exporter container arguments | `[]` | +| `metrics.kafka.containerPorts.metrics` | Kafka exporter metrics container port | `9308` | +| `metrics.kafka.resources.limits` | The resources limits for the container | `{}` | +| `metrics.kafka.resources.requests` | The requested resources for the container | `{}` | +| `metrics.kafka.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `metrics.kafka.podSecurityContext.fsGroup` | Set Kafka exporter pod's Security Context fsGroup | `1001` | +| `metrics.kafka.containerSecurityContext.enabled` | Enable Kafka exporter containers' Security Context | `true` | +| `metrics.kafka.containerSecurityContext.runAsUser` | Set Kafka exporter containers' Security Context runAsUser | `1001` | +| `metrics.kafka.containerSecurityContext.runAsNonRoot` | Set Kafka exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.kafka.hostAliases` | Kafka exporter pods host aliases | `[]` | +| `metrics.kafka.podLabels` | Extra labels for Kafka exporter pods | `{}` | +| `metrics.kafka.podAnnotations` | Extra annotations for Kafka exporter pods | `{}` | +| `metrics.kafka.podAffinityPreset` | Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `metrics.kafka.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `metrics.kafka.nodeAffinityPreset.key` | Node label key to match Ignored if `metrics.kafka.affinity` is set. | `""` | +| `metrics.kafka.nodeAffinityPreset.values` | Node label values to match. Ignored if `metrics.kafka.affinity` is set. | `[]` | +| `metrics.kafka.affinity` | Affinity for pod assignment | `{}` | +| `metrics.kafka.nodeSelector` | Node labels for pod assignment | `{}` | +| `metrics.kafka.tolerations` | Tolerations for pod assignment | `[]` | +| `metrics.kafka.schedulerName` | Name of the k8s scheduler (other than default) for Kafka exporter | `""` | +| `metrics.kafka.priorityClassName` | Kafka exporter pods' priorityClassName | `""` | +| `metrics.kafka.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | +| `metrics.kafka.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka exporter pod(s) | `[]` | +| `metrics.kafka.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) | `[]` | +| `metrics.kafka.sidecars` | Add additional sidecar containers to the Kafka exporter pod(s) | `[]` | +| `metrics.kafka.initContainers` | Add init containers to the Kafka exporter pods | `[]` | +| `metrics.kafka.service.ports.metrics` | Kafka exporter metrics service port | `9308` | +| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.kafka.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.kafka.service.annotations` | Annotations for the Kafka exporter service | `{}` | +| `metrics.kafka.serviceAccount.create` | Enable creation of ServiceAccount for Kafka exporter pods | `true` | +| `metrics.kafka.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `metrics.kafka.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | +| `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` | +| `metrics.jmx.image.repository` | JMX exporter image repository | `bitnami/jmx-exporter` | +| `metrics.jmx.image.tag` | JMX exporter image tag (immutable tags are recommended) | `0.17.2-debian-11-r41` | +| `metrics.jmx.image.digest` | JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | +| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `metrics.jmx.containerSecurityContext.enabled` | Enable Prometheus JMX exporter containers' Security Context | `true` | +| `metrics.jmx.containerSecurityContext.runAsUser` | Set Prometheus JMX exporter containers' Security Context runAsUser | `1001` | +| `metrics.jmx.containerSecurityContext.runAsNonRoot` | Set Prometheus JMX exporter containers' Security Context runAsNonRoot | `true` | +| `metrics.jmx.containerPorts.metrics` | Prometheus JMX exporter metrics container port | `5556` | +| `metrics.jmx.resources.limits` | The resources limits for the JMX exporter container | `{}` | +| `metrics.jmx.resources.requests` | The requested resources for the JMX exporter container | `{}` | +| `metrics.jmx.service.ports.metrics` | Prometheus JMX exporter metrics service port | `5556` | +| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `""` | +| `metrics.jmx.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `metrics.jmx.service.annotations` | Annotations for the Prometheus JMX exporter service | `{}` | +| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter | `["kafka.controller:*","kafka.server:*","java.lang:*","kafka.network:*","kafka.log:*"]` | +| `metrics.jmx.config` | Configuration file for JMX exporter | `""` | +| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `""` | +| `metrics.jmx.extraRules` | Add extra rules to JMX exporter configuration | `""` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | if `true`, creates a Prometheus Operator PrometheusRule (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.prometheusRule.labels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.groups` | Prometheus Rule Groups for Kafka | `[]` | -### Zookeeper chart parameters -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `zookeeper.enabled` | Switch to enable or disable the Zookeeper helm chart | `true` | -| `zookeeper.persistence.enabled` | Enable Zookeeper persistence using PVC | `true` | -| `externalZookeeper.servers` | Server or list of external Zookeeper servers to use | `[]` | +### Kafka provisioning parameters + +| Name | Description | Value | +| ---------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `provisioning.enabled` | Enable kafka provisioning Job | `false` | +| `provisioning.numPartitions` | Default number of partitions for topics when unspecified | `1` | +| `provisioning.replicationFactor` | Default replication factor for topics when unspecified | `1` | +| `provisioning.topics` | Kafka topics to provision | `[]` | +| `provisioning.nodeSelector` | Node labels for pod assignment | `{}` | +| `provisioning.tolerations` | Tolerations for pod assignment | `[]` | +| `provisioning.extraProvisioningCommands` | Extra commands to run to provision cluster resources | `[]` | +| `provisioning.parallel` | Number of provisioning commands to run at the same time | `1` | +| `provisioning.preScript` | Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | +| `provisioning.postScript` | Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations | `""` | +| `provisioning.auth.tls.type` | Format to use for TLS certificates. Allowed types: `jks` and `pem`. | `jks` | +| `provisioning.auth.tls.certificatesSecret` | Existing secret containing the TLS certificates for the Kafka provisioning Job. | `""` | +| `provisioning.auth.tls.cert` | The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) | `tls.crt` | +| `provisioning.auth.tls.key` | The secret key from the certificatesSecret if 'key' key different from the default (tls.key) | `tls.key` | +| `provisioning.auth.tls.caCert` | The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) | `ca.crt` | +| `provisioning.auth.tls.keystore` | The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) | `keystore.jks` | +| `provisioning.auth.tls.truststore` | The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) | `truststore.jks` | +| `provisioning.auth.tls.passwordsSecret` | Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. | `""` | +| `provisioning.auth.tls.keyPasswordSecretKey` | The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) | `key-password` | +| `provisioning.auth.tls.keystorePasswordSecretKey` | The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) | `keystore-password` | +| `provisioning.auth.tls.truststorePasswordSecretKey` | The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) | `truststore-password` | +| `provisioning.auth.tls.keyPassword` | Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.keystorePassword` | Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.auth.tls.truststorePassword` | Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. | `""` | +| `provisioning.command` | Override provisioning container command | `[]` | +| `provisioning.args` | Override provisioning container arguments | `[]` | +| `provisioning.extraEnvVars` | Extra environment variables to add to the provisioning pod | `[]` | +| `provisioning.extraEnvVarsCM` | ConfigMap with extra environment variables | `""` | +| `provisioning.extraEnvVarsSecret` | Secret with extra environment variables | `""` | +| `provisioning.podAnnotations` | Extra annotations for Kafka provisioning pods | `{}` | +| `provisioning.podLabels` | Extra labels for Kafka provisioning pods | `{}` | +| `provisioning.serviceAccount.create` | Enable creation of ServiceAccount for Kafka provisioning pods | `false` | +| `provisioning.serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `provisioning.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `provisioning.resources.limits` | The resources limits for the Kafka provisioning container | `{}` | +| `provisioning.resources.requests` | The requested resources for the Kafka provisioning container | `{}` | +| `provisioning.podSecurityContext.enabled` | Enable security context for the pods | `true` | +| `provisioning.podSecurityContext.fsGroup` | Set Kafka provisioning pod's Security Context fsGroup | `1001` | +| `provisioning.containerSecurityContext.enabled` | Enable Kafka provisioning containers' Security Context | `true` | +| `provisioning.containerSecurityContext.runAsUser` | Set Kafka provisioning containers' Security Context runAsUser | `1001` | +| `provisioning.containerSecurityContext.runAsNonRoot` | Set Kafka provisioning containers' Security Context runAsNonRoot | `true` | +| `provisioning.schedulerName` | Name of the k8s scheduler (other than default) for kafka provisioning | `""` | +| `provisioning.extraVolumes` | Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) | `[]` | +| `provisioning.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) | `[]` | +| `provisioning.sidecars` | Add additional sidecar containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.initContainers` | Add additional Add init containers to the Kafka provisioning pod(s) | `[]` | +| `provisioning.waitForKafka` | If true use an init container to wait until kafka is ready before starting provisioning | `true` | + + +### ZooKeeper chart parameters + +| Name | Description | Value | +| --------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `zookeeper.enabled` | Switch to enable or disable the ZooKeeper helm chart | `true` | +| `zookeeper.replicaCount` | Number of ZooKeeper nodes | `1` | +| `zookeeper.auth.client.enabled` | Enable ZooKeeper auth | `false` | +| `zookeeper.auth.client.clientUser` | User that will use ZooKeeper clients to auth | `""` | +| `zookeeper.auth.client.clientPassword` | Password that will use ZooKeeper clients to auth | `""` | +| `zookeeper.auth.client.serverUsers` | Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" | `""` | +| `zookeeper.auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" | `""` | +| `zookeeper.persistence.enabled` | Enable persistence on ZooKeeper using PVC(s) | `true` | +| `zookeeper.persistence.storageClass` | Persistent Volume storage class | `""` | +| `zookeeper.persistence.accessModes` | Persistent Volume access modes | `["ReadWriteOnce"]` | +| `zookeeper.persistence.size` | Persistent Volume size | `8Gi` | +| `externalZookeeper.servers` | List of external zookeeper servers to use. Typically used in combination with 'zookeeperChrootPath'. | `[]` | + Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, ```console -helm install my-release \ +$ helm install my-release \ --set replicaCount=3 \ - bitnami/kafka + my-repo/kafka ``` The above command deploys Kafka with 3 brokers (replicas). @@ -280,7 +506,7 @@ The above command deploys Kafka with 3 brokers (replicas). Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, ```console -helm install my-release -f values.yaml bitnami/kafka +$ helm install my-release -f values.yaml my-repo/kafka ``` > **Tip**: You can use the default [values.yaml](values.yaml) @@ -293,121 +519,20 @@ It is strongly recommended to use immutable tags in a production environment. Th Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. -### Production configuration and horizontal scaling - -This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. - -- Number of Kafka nodes: - -```diff -- replicaCount: 1 -+ replicaCount: 3 -``` - -- Allow to use the PLAINTEXT listener: - -```diff -- allowPlaintextListener: true -+ allowPlaintextListener: false -``` - -- Default replication factors for automatically created topics: - -```diff -- defaultReplicationFactor: 1 -+ defaultReplicationFactor: 3 -``` - -- Allow auto creation of topics. - -```diff -- autoCreateTopicsEnable: true -+ autoCreateTopicsEnable: false -``` - -- The replication factor for the offsets topic: - -```diff -- offsetsTopicReplicationFactor: 1 -+ offsetsTopicReplicationFactor: 3 -``` - -- The replication factor for the transaction topic: - -```diff -- transactionStateLogReplicationFactor: 1 -+ transactionStateLogReplicationFactor: 3 -``` - -- Overridden min.insync.replicas config for the transaction topic: - -```diff -- transactionStateLogMinIsr: 1 -+ transactionStateLogMinIsr: 3 -``` - -- Switch to enable the Kafka SASAL authentication on client and inter-broker communications: - -```diff -- auth.clientProtocol: plaintext -+ auth.clientProtocol: sasl -- auth.interBrokerProtocol: plaintext -+ auth.interBrokerProtocol: sasl -``` - -- Enable Zookeeper authentication: - -```diff -+ auth.jaas.zookeeperUser: zookeeperUser -+ auth.jaas.zookeeperPassword: zookeeperPassword -- zookeeper.auth.enabled: false -+ zookeeper.auth.enabled: true -+ zookeeper.auth.clientUser: zookeeperUser -+ zookeeper.auth.clientPassword: zookeeperPassword -+ zookeeper.auth.serverUsers: zookeeperUser -+ zookeeper.auth.serverPasswords: zookeeperPassword -``` - -- Enable Pod Disruption Budget: - -```diff -- pdb.create: false -+ pdb.create: true -``` - -- Create a separate Kafka metrics exporter: - -```diff -- metrics.kafka.enabled: false -+ metrics.kafka.enabled: true -``` - -- Expose JMX metrics to Prometheus: - -```diff -- metrics.jmx.enabled: false -+ metrics.jmx.enabled: true -``` - -- Enable Zookeeper metrics: - -```diff -+ zookeeper.metrics.enabled: true -``` - -To horizontally scale this chart once it has been deployed, you can upgrade the statefulset using a new value for the `replicaCount` parameter. Please note that, when enabling TLS encryption, you must update your JKS secret including the keystore for the new replicas. - ### Setting custom parameters Any environment variable beginning with `KAFKA_CFG_` will be mapped to its corresponding Kafka key. For example, use `KAFKA_CFG_BACKGROUND_THREADS` in order to set `background.threads`. In order to pass custom environment variables use the `extraEnvVars` property. +Using `extraEnvVars` with `KAFKA_CFG_` is the preferred and simplest way to add custom Kafka parameters not otherwise specified in this chart. Alternatively, you can provide a *full* Kafka configuration using `config` or `existingConfigmap`. +Setting either `config` or `existingConfigmap` will cause the chart to disregard `KAFKA_CFG_` settings, which are used by many other Kafka-related chart values described above, as well as dynamically generated parameters such as `zookeeper.connect`. This can cause unexpected behavior. + ### Listeners configuration This chart allows you to automatically configure Kafka with 3 listeners: - One for inter-broker communications. - A second one for communications with clients within the K8s cluster. -- (optional) a third listener for communications with clients outside the K8s cluster. Check [this section](#accessing-kafka-brokers-from-outside-the-clusters) for more information. +- (optional) a third listener for communications with clients outside the K8s cluster. Check [this section](#accessing-kafka-brokers-from-outside-the-cluster) for more information. For more complex configurations, set the `listeners`, `advertisedListeners` and `listenerSecurityProtocolMap` parameters as needed. @@ -415,33 +540,43 @@ For more complex configurations, set the `listeners`, `advertisedListeners` and You can configure different authentication protocols for each listener you configure in Kafka. For instance, you can use `sasl_tls` authentication for client communications, while using `tls` for inter-broker communications. This table shows the available protocols and the security they provide: -| Method | Authentication | Encryption via TLS | -|-----------|-------------------------------|--------------------| -| plaintext | None | No | -| tls | None | Yes | -| mtls | Yes (two-way authentication) | Yes | -| sasl | Yes (via SASL) | No | -| sasl_tls | Yes (via SASL) | Yes | +| Method | Authentication | Encryption via TLS | +|-----------|------------------------------|--------------------| +| plaintext | None | No | +| tls | None | Yes | +| mtls | Yes (two-way authentication) | Yes | +| sasl | Yes (via SASL) | No | +| sasl_tls | Yes (via SASL) | Yes | + +Learn more about how to configure Kafka to use the different authentication protocols in the [chart documentation](https://docs.bitnami.com/kubernetes/infrastructure/kafka/administration/enable-security/). If you enabled SASL authentication on any listener, you can set the SASL credentials using the parameters below: -- `auth.jaas.clientUsers`/`auth.jaas.clientPasswords`: when enabling SASL authentication for communications with clients. -- `auth.jaas.interBrokerUser`/`auth.jaas.interBrokerPassword`: when enabling SASL authentication for inter-broker communications. +- `auth.sasl.jaas.clientUsers`/`auth.sasl.jaas.clientPasswords`: when enabling SASL authentication for communications with clients. +- `auth.sasl.jaas.interBrokerUser`/`auth.sasl.jaas.interBrokerPassword`: when enabling SASL authentication for inter-broker communications. - `auth.jaas.zookeeperUser`/`auth.jaas.zookeeperPassword`: In the case that the Zookeeper chart is deployed with SASL authentication enabled. -In order to configure TLS authentication/encryption, you **must** create a secret containing the Java Key Stores (JKS) files: the truststore (`kafka.truststore.jks`) and one keystore (`kafka.keystore.jks`) per Kafka broker you have in the cluster. Then, you need pass the secret name with the `--auth.jksSecret` parameter when deploying the chart. +In order to configure TLS authentication/encryption, you **can** create a secret per Kafka broker you have in the cluster containing the Java Key Stores (JKS) files: the truststore (`kafka.truststore.jks`) and the keystore (`kafka.keystore.jks`). Then, you need pass the secret names with the `auth.tls.existingSecrets` parameter when deploying the chart. -> **Note**: If the JKS files are password protected (recommended), you will need to provide the password to get access to the keystores. To do so, use the `auth.jksPassword` parameter to provide your password. +> **Note**: If the JKS files are password protected (recommended), you will need to provide the password to get access to the keystores. To do so, use the `auth.tls.password` parameter to provide your password. -For instance, to configure TLS authentication on a Kafka cluster with 2 Kafka brokers use the command below to create the secret: +For instance, to configure TLS authentication on a Kafka cluster with 2 Kafka brokers use the commands below to create the secrets: ```console -kubectl create secret generic kafka-jks --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks +$ kubectl create secret generic kafka-jks-0 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-0.keystore.jks +$ kubectl create secret generic kafka-jks-1 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-1.keystore.jks ``` -> **Note**: the command above assumes you already created the trustore and keystores files. This [script](https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh) can help you with the JKS files generation. +> **Note**: the command above assumes you already created the truststore and keystores files. This [script](https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh) can help you with the JKS files generation. -As an alternative to manually create the secret before installing the chart, you can put your JKS files inside the chart folder `files/jks`, an a secret including them will be generated. Please note this alternative requires to have the chart downloaded locally, so you will have to clone this repository or fetch the chart before installing it. +If, for some reason (like using Cert-Manager) you can not use the default JKS secret scheme, you can use the additional parameters: + +- `auth.tls.jksTruststoreSecret` to define additional secret, where the `kafka.truststore.jks` is being kept. The truststore password **must** be the same as in `auth.tls.password` +- `auth.tls.jksTruststore` to overwrite the default value of the truststore key (`kafka.truststore.jks`). +- `auth.tls.jksKeystoreSAN` if you want to use a SAN certificate for your brokers. Setting this parameter would mean that the chart expects a existing key in the `auth.tls.jksTruststoreSecret` with the `auth.tls.jksKeystoreSAN` value and use this as a keystore for **all** brokers +> **Note**: If you are using cert-manager, particularly when an ACME issuer is used, the `ca.crt` field is not put in the `Secret` that cert-manager creates. To handle this, the `auth.tls.pemChainIncluded` property can be set to `true` and the initContainer created by this Chart will attempt to extract the intermediate certs from the `tls.crt` field of the secret (which is a PEM chain) + +> **Note**: The truststore/keystore from above **must** be protected with the same password as in `auth.tls.password` You can deploy the chart with authentication using the following parameters: @@ -449,12 +584,13 @@ You can deploy the chart with authentication using the following parameters: replicaCount=2 auth.clientProtocol=sasl auth.interBrokerProtocol=tls -auth.certificatesSecret=kafka-jks -auth.certificatesPassword=jksPassword -auth.jaas.clientUsers[0]=brokerUser -auth.jaas.clientPassword[0]=brokerPassword -auth.jaas.zookeeperUser=zookeeperUser -auth.jaas.zookeeperPassword=zookeeperPassword +auth.tls.existingSecrets[0]=kafka-jks-0 +auth.tls.existingSecrets[1]=kafka-jks-1 +auth.tls.password=jksPassword +auth.sasl.jaas.clientUsers[0]=brokerUser +auth.sasl.jaas.clientPasswords[0]=brokerPassword +auth.sasl.jaas.zookeeperUser=zookeeperUser +auth.sasl.jaas.zookeeperPassword=zookeeperPassword zookeeper.auth.enabled=true zookeeper.auth.serverUsers=zookeeperUser zookeeper.auth.serverPasswords=zookeeperPassword @@ -462,7 +598,36 @@ zookeeper.auth.clientUser=zookeeperUser zookeeper.auth.clientPassword=zookeeperPassword ``` -If you also enable exposing metrics using the Kafka expoter, and you are using `sasl_tls`, `tls`, or `mtls` authentication protocols, you need to mount the CA certificated used to sign the brokers certificates in the exporter so it can validate the Kafka brokers. To do so, create a secret containing the CA, and set the `metrics.certificatesSecret` parameter. As an alternative, you can skip TLS validation using extra flags: +You can deploy the chart with AclAuthorizer using the following parameters: + +```console +replicaCount=2 +auth.clientProtocol=sasl +auth.interBrokerProtocol=sasl_tls +auth.tls.existingSecrets[0]=kafka-jks-0 +auth.tls.existingSecrets[1]=kafka-jks-1 +auth.tls.password=jksPassword +auth.sasl.jaas.clientUsers[0]=brokerUser +auth.sasl.jaas.clientPasswords[0]=brokerPassword +auth.sasl.jaas.zookeeperUser=zookeeperUser +auth.sasl.jaas.zookeeperPassword=zookeeperPassword +zookeeper.auth.enabled=true +zookeeper.auth.serverUsers=zookeeperUser +zookeeper.auth.serverPasswords=zookeeperPassword +zookeeper.auth.clientUser=zookeeperUser +zookeeper.auth.clientPassword=zookeeperPassword +authorizerClassName=kafka.security.authorizer.AclAuthorizer +allowEveryoneIfNoAclFound=false +superUsers=User:admin +``` + +If you are using Kafka ACLs, you might encounter in kafka-authorizer.log the following event: `[...] Principal = User:ANONYMOUS is Allowed Operation [...]`. + +By setting the following parameter: `auth.clientProtocol=mtls`, it will set the configuration in Kafka to `ssl.client.auth=required`. This option will require the clients to authenticate to Kafka brokers. + +As result, we will be able to see in kafka-authorizer.log the events specific Subject: `[...] Principal = User:CN=kafka,OU=...,O=...,L=...,C=..,ST=... is [...]`. + +If you also enable exposing metrics using the Kafka exporter, and you are using `sasl_tls`, `tls`, or `mtls` authentication protocols, you need to mount the CA certificated used to sign the brokers certificates in the exporter so it can validate the Kafka brokers. To do so, create a secret containing the CA, and set the `metrics.certificatesSecret` parameter. As an alternative, you can skip TLS validation using extra flags: ```console metrics.kafka.extraFlags={tls.insecure-skip-tls-verify: ""} @@ -472,7 +637,7 @@ metrics.kafka.extraFlags={tls.insecure-skip-tls-verify: ""} In order to access Kafka Brokers from outside the cluster, an additional listener and advertised listener must be configured. Additionally, a specific service per kafka pod will be created. -There are two ways of configuring external access. Using LoadBalancer services or using NodePort services. +There are three ways of configuring external access. Using LoadBalancer services, using NodePort services or using ClusterIP services. #### Using LoadBalancer services @@ -483,7 +648,7 @@ You have two alternatives to use LoadBalancer services: ```console externalAccess.enabled=true externalAccess.service.type=LoadBalancer -externalAccess.service.port=9094 +externalAccess.service.ports.external=9094 externalAccess.autoDiscovery.enabled=true serviceAccount.create=true rbac.create=true @@ -496,13 +661,15 @@ Note: This option requires creating RBAC rules on clusters where RBAC policies a ```console externalAccess.enabled=true externalAccess.service.type=LoadBalancer -externalAccess.service.port=9094 +externalAccess.service.ports.external=9094 externalAccess.service.loadBalancerIPs[0]='external-ip-1' externalAccess.service.loadBalancerIPs[1]='external-ip-2'} ``` Note: You need to know in advance the load balancer IPs so each Kafka broker advertised listener is configured with it. +Following the aforementioned steps will also allow to connect the brokers from the outside using the cluster's default service (when `service.type` is `LoadBalancer` or `NodePort`). Use the property `service.externalPort` to specify the port used for external connections. + #### Using NodePort services You have two alternatives to use NodePort services: @@ -524,15 +691,44 @@ Note: This option requires creating RBAC rules on clusters where RBAC policies a ```console externalAccess.enabled=true externalAccess.service.type=NodePort -externalAccess.serivce.nodePorts[0]='node-port-1' -externalAccess.serivce.nodePorts[1]='node-port-2' +externalAccess.service.nodePorts[0]='node-port-1' +externalAccess.service.nodePorts[1]='node-port-2' ``` Note: You need to know in advance the node ports that will be exposed so each Kafka broker advertised listener is configured with it. -The pod will try to get the external ip of the node using `curl -s https://ipinfo.io/ip` unless `externalAccess.service.domain` is provided. +The pod will try to get the external ip of the node using `curl -s https://ipinfo.io/ip` unless `externalAccess.service.domain` or `externalAccess.service.useHostIPs` is provided. -Following the aforementioned steps will also allow to connect the brokers from the outside using the cluster's default service (when `service.type` is `LoadBalancer` or `NodePort`). Use the property `service.externalPort` to specify the port used for external connections. +#### Using ClusterIP services + +Note: This option requires that an ingress is deployed within your cluster + +```console +externalAccess.enabled=true +externalAccess.service.type=ClusterIP +externalAccess.service.ports.external=9094 +externalAccess.service.domain='ingress-ip' +``` + +Note: the deployed ingress must contain the following block: + +```console +tcp: + 9094: "{{ .Release.Namespace }}/{{ include "kafka.fullname" . }}-0-external:9094" + 9095: "{{ .Release.Namespace }}/{{ include "kafka.fullname" . }}-1-external:9094" + 9096: "{{ .Release.Namespace }}/{{ include "kafka.fullname" . }}-2-external:9094" +``` + +#### Name resolution with External-DNS + +You can use the following values to generate External-DNS annotations which automatically creates DNS records for each ReplicaSet pod: + +```yaml +externalAccess: + service: + annotations: + external-dns.alpha.kubernetes.io/hostname: "{{ .targetPod }}.example.com" +``` ### Sidecars @@ -548,28 +744,35 @@ sidecars: containerPort: 1234 ``` +### Setting Pod's affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + ### Deploying extra resources -There are cases where you may want to deploy extra objects, such as Kafka Connect. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. The following example would create a deployment including a Kafka Connect deployment so you can connect Kafka with MongoDB: +There are cases where you may want to deploy extra objects, such as Kafka Connect. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. The following example would create a deployment including a Kafka Connect deployment so you can connect Kafka with MongoDB®: ```yaml ## Extra objects to deploy (value evaluated as a template) ## -extraDeploy: |- - - apiVersion: apps/v1 +extraDeploy: + - | + apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "kafka.fullname" . }}-connect - labels: {{- include "kafka.labels" . | nindent 6 }} + labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: connector spec: replicas: 1 selector: - matchLabels: {{- include "kafka.matchLabels" . | nindent 8 }} + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} app.kubernetes.io/component: connector template: metadata: - labels: {{- include "kafka.labels" . | nindent 10 }} + labels: {{- include "common.labels.standard" . | nindent 8 }} app.kubernetes.io/component: connector spec: containers: @@ -581,16 +784,17 @@ extraDeploy: |- containerPort: 8083 volumeMounts: - name: configuration - mountPath: /opt/bitnami/kafka/config + mountPath: /bitnami/kafka/config volumes: - name: configuration configMap: name: {{ include "kafka.fullname" . }}-connect - - apiVersion: v1 + - | + apiVersion: v1 kind: ConfigMap metadata: name: {{ include "kafka.fullname" . }}-connect - labels: {{- include "kafka.labels" . | nindent 6 }} + labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: connector data: connect-standalone.properties: |- @@ -599,18 +803,19 @@ extraDeploy: |- mongodb.properties: |- connection.uri=mongodb://root:password@mongodb-hostname:27017 ... - - apiVersion: v1 + - | + apiVersion: v1 kind: Service metadata: name: {{ include "kafka.fullname" . }}-connect - labels: {{- include "kafka.labels" . | nindent 6 }} + labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: connector spec: ports: - protocol: TCP port: 8083 targetPort: connector - selector: {{- include "kafka.matchLabels" . | nindent 6 }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} app.kubernetes.io/component: connector ``` @@ -618,7 +823,7 @@ You can create the Kafka Connect image using the Dockerfile below: ```Dockerfile FROM bitnami/kafka:latest -# Download MongoDB Connector for Apache Kafka https://www.confluent.io/hub/mongodb/kafka-connect-mongodb +# Download MongoDB® Connector for Apache Kafka https://www.confluent.io/hub/mongodb/kafka-connect-mongodb RUN mkdir -p /opt/bitnami/kafka/plugins && \ cd /opt/bitnami/kafka/plugins && \ curl --remote-name --location --silent https://search.maven.org/remotecontent?filepath=org/mongodb/kafka/mongo-kafka-connect/1.2.0/mongo-kafka-connect-1.2.0-all.jar @@ -627,7 +832,7 @@ CMD /opt/bitnami/kafka/bin/connect-standalone.sh /opt/bitnami/kafka/config/conne ## Persistence -The [Bitnami Kafka](https://github.com/bitnami/bitnami-docker-kafka) image stores the Kafka data at the `/bitnami/kafka` path of the container. +The [Bitnami Kafka](https://github.com/bitnami/containers/tree/main/bitnami/kafka) image stores the Kafka data at the `/bitnami/kafka` path of the container. Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#persistence-parameters) section to configure the PVC or to disable persistence. @@ -640,11 +845,103 @@ As an alternative, this chart supports using an initContainer to change the owne You can enable this initContainer by setting `volumePermissions.enabled` to `true`. +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + ## Upgrading +### To 20.0.0 + +This major updates the Zookeeper subchart to it newest major, 11.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1100). + +### To 19.0.0 + +This major updates Kafka to its newest version, 3.3.x. For more information, please refer to [kafka upgrade notes](https://kafka.apache.org/33/documentation.html#upgrade). + +### To 18.0.0 + +This major updates the Zookeeper subchart to it newest major, 10.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1000). + +### To 16.0.0 + +This major updates the Zookeeper subchart to it newest major, 9.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-900). + +### To 15.0.0 + +This major release bumps Kafka major version to `3.x` series. +It also renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. Some affected values are: + +- `service.port`, `service.internalPort` and `service.externalPort` have been regrouped under the `service.ports` map. +- `metrics.kafka.service.port` has been regrouped under the `metrics.kafka.service.ports` map. +- `metrics.jmx.service.port` has been regrouped under the `metrics.jmx.service.ports` map. +- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map. +- Several parameters marked as deprecated `14.x.x` are not supported anymore. + +Additionally updates the ZooKeeper subchart to it newest major, `8.0.0`, which contains similar changes. + +### To 14.0.0 + +In this version, the `image` block is defined once and is used in the different templates, while in the previous version, the `image` block was duplicated for the main container and the provisioning one + +```yaml +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +``` + +VS + +```yaml +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +... +provisioning: + image: + registry: docker.io + repository: bitnami/kafka + tag: 2.8.0 +``` + +See [PR#7114](https://github.com/bitnami/charts/pull/7114) for more info about the implemented changes + +### To 13.0.0 + +This major updates the Zookeeper subchart to it newest major, 7.0.0, which renames all TLS-related settings. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-700). + +### To 12.2.0 + +This version also introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Move dependency information from the *requirements.yaml* to the *Chart.yaml* +- After running `helm dependency update`, a *Chart.lock* file is generated containing the same structure used in the previous *requirements.lock* +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + ### To 11.8.0 -External access to brokers can now be archived through the cluster's Kafka service. +External access to brokers can now be achieved through the cluster's Kafka service. - `service.nodePort` -> deprecated in favor of `service.nodePorts.client` and `service.nodePorts.external` @@ -657,7 +954,7 @@ The way to configure the users and passwords changed. Now it is allowed to creat ### To 11.0.0 -The way to configure listeners and authentication on Kafka is totally refactored allowing users to configure different authentication protocols on different listeners. Please check the sections [Listeners Configuration](listeners-configuration) and [Listeners Configuration](enable-kafka-for-kafka-and-zookeeper) for more information. +The way to configure listeners and athentication on Kafka is totally refactored allowing users to configure different authentication protocols on different listeners. Please check the [Listeners Configuration](#listeners-configuration) section for more information. Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed or disappeared in favor of new ones on this major version: @@ -712,8 +1009,8 @@ Backwards compatibility is not guaranteed when Kafka metrics are enabled, unless Use the workaround below to upgrade from versions previous to 7.0.0. The following example assumes that the release name is kafka: ```console -helm upgrade kafka bitnami/kafka --version 6.1.8 --set metrics.kafka.enabled=false -helm upgrade kafka bitnami/kafka --version 7.0.0 --set metrics.kafka.enabled=true +$ helm upgrade kafka my-repo/kafka --version 6.1.8 --set metrics.kafka.enabled=false +$ helm upgrade kafka my-repo/kafka --version 7.0.0 --set metrics.kafka.enabled=true ``` ### To 2.0.0 @@ -722,8 +1019,8 @@ Backwards compatibility is not guaranteed unless you modify the labels used on t Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is kafka: ```console -kubectl delete statefulset kafka-kafka --cascade=false -kubectl delete statefulset kafka-zookeeper --cascade=false +$ kubectl delete statefulset kafka-kafka --cascade=false +$ kubectl delete statefulset kafka-zookeeper --cascade=false ``` ### To 1.0.0 @@ -732,6 +1029,22 @@ Backwards compatibility is not guaranteed unless you modify the labels used on t Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is kafka: ```console -kubectl delete statefulset kafka-kafka --cascade=false -kubectl delete statefulset kafka-zookeeper --cascade=false +$ kubectl delete statefulset kafka-kafka --cascade=false +$ kubectl delete statefulset kafka-zookeeper --cascade=false ``` + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/.helmignore b/scripts/helmcharts/databases/charts/kafka/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/Chart.yaml b/scripts/helmcharts/databases/charts/kafka/charts/common/Chart.yaml new file mode 100644 index 000000000..f9ba944c8 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 2.2.2 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/main/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 2.2.2 diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/README.md b/scripts/helmcharts/databases/charts/kafka/charts/common/README.md new file mode 100644 index 000000000..ec43a5fab --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/README.md @@ -0,0 +1,351 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.topologyKey` | Return a topologyKey definition | `dict "topologyKey" "FOO"` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-----------------------------------|-----------------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|-----------------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_affinities.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_affinities.tpl new file mode 100644 index 000000000..81902a681 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_affinities.tpl @@ -0,0 +1,106 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_capabilities.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..9d9b76004 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_capabilities.tpl @@ -0,0 +1,154 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_errors.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..a79cc2e32 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_images.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_images.tpl new file mode 100644 index 000000000..46c659e79 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_images.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_ingress.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_ingress.tpl new file mode 100644 index 000000000..831da9caa --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_labels.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..252066c7e --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_names.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_names.tpl new file mode 100644 index 000000000..617a23489 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_names.tpl @@ -0,0 +1,66 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_secrets.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..a1708b2e8 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_secrets.tpl @@ -0,0 +1,165 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else -}} + {{- $value = $defaultValue | toString | b64enc -}} +{{- end -}} +{{- printf "%s" $value -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_storage.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..60e2a844f --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_tplvalues.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..2db166851 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_utils.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..b1ead50cf --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_warnings.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..ae10fa41e --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_cassandra.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 000000000..ded1ae3bc --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_mariadb.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 000000000..b6906ff77 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_mongodb.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 000000000..f820ec107 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_mysql.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 000000000..74472a061 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_postgresql.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 000000000..164ec0d01 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_redis.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_redis.tpl new file mode 100644 index 000000000..dcccfc1ae --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_validations.tpl b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_validations.tpl new file mode 100644 index 000000000..9a814cf40 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/common/values.yaml b/scripts/helmcharts/databases/charts/kafka/charts/common/values.yaml new file mode 100644 index 000000000..f2df68e5e --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/.helmignore b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/.helmignore old mode 100755 new mode 100644 diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/Chart.lock b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/Chart.lock new file mode 100644 index 000000000..065985261 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 2.2.2 +digest: sha256:49ca75cf23ba5eb7df4becef52580f98c8bd8194eb80368b9d7b875f6eefa8e5 +generated: "2023-01-06T05:12:14.420203052Z" diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/Chart.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/Chart.yaml old mode 100755 new mode 100644 index c3b15dc5c..ae6fd7e1f --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/Chart.yaml +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/Chart.yaml @@ -1,20 +1,26 @@ annotations: category: Infrastructure -apiVersion: v1 -appVersion: 3.6.2 -description: A centralized service for maintaining configuration information, naming, - providing distributed synchronization, and providing group services for distributed - applications. -engine: gotpl -home: https://github.com/bitnami/charts/tree/master/bitnami/zookeeper -icon: https://bitnami.com/assets/stacks/zookeeper/img/zookeeper-stack-110x117.png + licenses: | + - Apache-2.0 +apiVersion: v2 +appVersion: 3.8.0 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 2.x.x +description: Apache ZooKeeper provides a reliable, centralized register of configuration + data and services for distributed applications. +home: https://github.com/bitnami/charts/tree/main/bitnami/zookeeper +icon: https://bitnami.com/assets/stacks/zookeeper/img/zookeeper-stack-220x234.png keywords: - zookeeper maintainers: -- email: containers@bitnami.com - name: Bitnami +- name: Bitnami + url: https://github.com/bitnami/charts name: zookeeper sources: -- https://github.com/bitnami/bitnami-docker-zookeeper +- https://github.com/bitnami/containers/tree/main/bitnami/zookeeper - https://zookeeper.apache.org/ -version: 5.21.9 +version: 11.1.0 diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/README.md b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/README.md old mode 100755 new mode 100644 index 0291875ed..7a1c17ffb --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/README.md +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/README.md @@ -1,24 +1,30 @@ -# ZooKeeper + -[ZooKeeper](https://zookeeper.apache.org/) is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. All of these kinds of services are used in some form or other by distributed applications. +# Apache ZooKeeper packaged by Bitnami +Apache ZooKeeper provides a reliable, centralized register of configuration data and services for distributed applications. + +[Overview of Apache ZooKeeper](https://zookeeper.apache.org) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + ## TL;DR ```console -$ helm repo add bitnami https://charts.bitnami.com/bitnami -$ helm install my-release bitnami/zookeeper +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/zookeeper ``` ## Introduction -This chart bootstraps a [ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. +This chart bootstraps a [ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) deployment on a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. -Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. ## Prerequisites -- Kubernetes 1.12+ -- Helm 2.12+ or Helm 3.0-beta3+ +- Kubernetes 1.19+ +- Helm 3.2.0+ - PV provisioner support in the underlying infrastructure ## Installing the Chart @@ -26,8 +32,8 @@ Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment To install the chart with the release name `my-release`: ```console -$ helm repo add bitnami https://charts.bitnami.com/bitnami -$ helm install my-release bitnami/zookeeper +$ helm repo add my-repo https://charts.bitnami.com/bitnami +$ helm install my-release my-repo/zookeeper ``` These commands deploy ZooKeeper on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. @@ -46,206 +52,358 @@ The command removes all the Kubernetes components associated with the chart and ## Parameters -The following tables lists the configurable parameters of the ZooKeeper chart and their default values per section/component: +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `global.imageRegistry` | Global Docker image registry | `nil` | -| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | -| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | ### Common parameters -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `nameOverride` | String to partially override zookeeper.fullname | `nil` | -| `fullnameOverride` | String to fully override zookeeper.fullname | `nil` | -| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | -| `commonLabels` | Labels to add to all deployed objects | `{}` | -| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | -| `schedulerName` | Kubernetes pod scheduler registry | `nil` (use the default-scheduler) | +| Name | Description | Value | +| ------------------------ | -------------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname template (will maintain the release name) | `""` | +| `fullnameOverride` | String to fully override common.names.fullname template | `""` | +| `clusterDomain` | Kubernetes Cluster Domain | `cluster.local` | +| `extraDeploy` | Extra objects to deploy (evaluated as a template) | `[]` | +| `commonLabels` | Add labels to all the deployed resources | `{}` | +| `commonAnnotations` | Add annotations to all the deployed resources | `{}` | +| `namespaceOverride` | Override namespace for ZooKeeper resources | `""` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the statefulset | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the statefulset | `["infinity"]` | -### Zookeeper chart parameters -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `image.registry` | ZooKeeper image registry | `docker.io` | -| `image.repository` | ZooKeeper Image name | `bitnami/zookeeper` | -| `image.tag` | ZooKeeper Image tag | `{TAG_NAME}` | -| `image.pullPolicy` | ZooKeeper image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | -| `image.debug` | Specify if debug values should be set | `false` | -| `tickTime` | Basic time unit in milliseconds used by ZooKeeper for heartbeats | `2000` | -| `initLimit` | Time the ZooKeeper servers in quorum have to connect to a leader | `10` | -| `syncLimit` | How far out of date a server can be from a leader | `5` | -| `maxClientCnxns` | Number of concurrent connections that a single client may make to a single member | `60` | -| `maxSessionTimeout` | Maximum session timeout in milliseconds that the server will allow the client to negotiate. | `40000` | -| `autopurge.snapRetainCount` | Number of retains snapshots for autopurge | `3` | -| `autopurge.purgeInterval` | The time interval in hours for which the purge task has to be triggered | `0` | -| `fourlwCommandsWhitelist` | A list of comma separated Four Letter Words commands to use | `srvr, mntr` | -| `listenOnAllIPs` | Allow Zookeeper to listen for connections from its peers on all available IP addresses. | `false` | -| `allowAnonymousLogin` | Allow to accept connections from unauthenticated users | `yes` | -| `auth.existingSecret` | Use existing secret (ignores previous password) | `nil` | -| `auth.enabled` | Enable ZooKeeper auth | `false` | -| `auth.clientUser` | User that will use ZooKeeper clients to auth | `nil` | -| `auth.clientPassword` | Password that will use ZooKeeper clients to auth | `nil` | -| `auth.serverUsers` | List of user to be created | `nil` | -| `auth.serverPasswords` | List of passwords to assign to users when created | `nil` | -| `heapSize` | Size in MB for the Java Heap options (Xmx and XMs) | `[]` | -| `logLevel` | Log level of ZooKeeper server | `ERROR` | -| `jvmFlags` | Default JVMFLAGS for the ZooKeeper process | `nil` | -| `config` | Configure ZooKeeper with a custom zoo.conf file | `nil` | -| `dataLogDir` | Data log directory | `""` | +### ZooKeeper chart parameters + +| Name | Description | Value | +| ----------------------------- | -------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `image.registry` | ZooKeeper image registry | `docker.io` | +| `image.repository` | ZooKeeper image repository | `bitnami/zookeeper` | +| `image.tag` | ZooKeeper image tag (immutable tags are recommended) | `3.8.0-debian-11-r74` | +| `image.digest` | ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | ZooKeeper image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug values should be set | `false` | +| `auth.client.enabled` | Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5 | `false` | +| `auth.client.clientUser` | User that will use ZooKeeper clients to auth | `""` | +| `auth.client.clientPassword` | Password that will use ZooKeeper clients to auth | `""` | +| `auth.client.serverUsers` | Comma, semicolon or whitespace separated list of user to be created | `""` | +| `auth.client.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created | `""` | +| `auth.client.existingSecret` | Use existing secret (ignores previous passwords) | `""` | +| `auth.quorum.enabled` | Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5 | `false` | +| `auth.quorum.learnerUser` | User that the ZooKeeper quorumLearner will use to authenticate to quorumServers. | `""` | +| `auth.quorum.learnerPassword` | Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers. | `""` | +| `auth.quorum.serverUsers` | Comma, semicolon or whitespace separated list of users for the quorumServers. | `""` | +| `auth.quorum.serverPasswords` | Comma, semicolon or whitespace separated list of passwords to assign to users when created | `""` | +| `auth.quorum.existingSecret` | Use existing secret (ignores previous passwords) | `""` | +| `tickTime` | Basic time unit (in milliseconds) used by ZooKeeper for heartbeats | `2000` | +| `initLimit` | ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader | `10` | +| `syncLimit` | How far out of date a server can be from a leader | `5` | +| `preAllocSize` | Block size for transaction log file | `65536` | +| `snapCount` | The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) | `100000` | +| `maxClientCnxns` | Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble | `60` | +| `maxSessionTimeout` | Maximum session timeout (in milliseconds) that the server will allow the client to negotiate | `40000` | +| `heapSize` | Size (in MB) for the Java Heap options (Xmx and Xms) | `1024` | +| `fourlwCommandsWhitelist` | A list of comma separated Four Letter Words commands that can be executed | `srvr, mntr, ruok` | +| `minServerId` | Minimal SERVER_ID value, nodes increment their IDs respectively | `1` | +| `listenOnAllIPs` | Allow ZooKeeper to listen for connections from its peers on all available IP addresses | `false` | +| `autopurge.snapRetainCount` | The most recent snapshots amount (and corresponding transaction logs) to retain | `3` | +| `autopurge.purgeInterval` | The time interval (in hours) for which the purge task has to be triggered | `0` | +| `logLevel` | Log level for the ZooKeeper server. ERROR by default | `ERROR` | +| `jvmFlags` | Default JVM flags for the ZooKeeper process | `""` | +| `dataLogDir` | Dedicated data log directory | `""` | +| `configuration` | Configure ZooKeeper with a custom zoo.cfg file | `""` | +| `existingConfigmap` | The name of an existing ConfigMap with your custom configuration for ZooKeeper | `""` | +| `extraEnvVars` | Array with extra environment variables to add to ZooKeeper nodes | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for ZooKeeper nodes | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars for ZooKeeper nodes | `""` | +| `command` | Override default container command (useful when using custom images) | `["/scripts/setup.sh"]` | +| `args` | Override default container args (useful when using custom images) | `[]` | + ### Statefulset parameters -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `replicaCount` | Number of ZooKeeper nodes | `1` | -| `updateStrategy` | Update strategy for the statefulset | `RollingUpdate` | -| `rollingUpdatePartition` | Partition update strategy | `nil` | -| `podManagementPolicy` | Pod management policy | `Parallel` | -| `podLabels` | ZooKeeper pod labels | `{}` (evaluated as a template) | -| `podAnnotations` | ZooKeeper Pod annotations | `{}` (evaluated as a template) | -| `affinity` | Affinity for pod assignment | `{}` (evaluated as a template) | -| `nodeSelector` | Node labels for pod assignment | `{}` (evaluated as a template) | -| `tolerations` | Tolerations for pod assignment | `[]` (evaluated as a template) | -| `priorityClassName` | Name of the existing priority class to be used by ZooKeeper pods | `""` | -| `securityContext.enabled` | Enable security context (ZooKeeper master pod) | `true` | -| `securityContext.fsGroup` | Group ID for the container (ZooKeeper master pod) | `1001` | -| `securityContext.runAsUser` | User ID for the container (ZooKeeper master pod) | `1001` | -| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | -| `livenessProbe` | Liveness probe configuration for ZooKeeper | Check `values.yaml` file | -| `readinessProbe` | Readiness probe configuration for ZooKeeper | Check `values.yaml` file | -| `extraVolumes` | Extra volumes | `nil` | -| `extraVolumeMounts` | Mount extra volume(s) | `nil` | -| `podDisruptionBudget.maxUnavailable` | Max number of pods down simultaneously | `1` | +| Name | Description | Value | +| --------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------- | +| `replicaCount` | Number of ZooKeeper nodes | `1` | +| `containerPorts.client` | ZooKeeper client container port | `2181` | +| `containerPorts.tls` | ZooKeeper TLS container port | `3181` | +| `containerPorts.follower` | ZooKeeper follower container port | `2888` | +| `containerPorts.election` | ZooKeeper election container port | `3888` | +| `livenessProbe.enabled` | Enable livenessProbe on ZooKeeper containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `30` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `livenessProbe.probeCommandTimeout` | Probe command timeout for livenessProbe | `2` | +| `readinessProbe.enabled` | Enable readinessProbe on ZooKeeper containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `readinessProbe.probeCommandTimeout` | Probe command timeout for readinessProbe | `2` | +| `startupProbe.enabled` | Enable startupProbe on ZooKeeper containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `30` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `15` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `lifecycleHooks` | for the ZooKeeper container(s) to automate configuration before or after startup | `{}` | +| `resources.limits` | The resources limits for the ZooKeeper containers | `{}` | +| `resources.requests.memory` | The requested memory for the ZooKeeper containers | `256Mi` | +| `resources.requests.cpu` | The requested cpu for the ZooKeeper containers | `250m` | +| `podSecurityContext.enabled` | Enabled ZooKeeper pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set ZooKeeper pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enabled ZooKeeper containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set ZooKeeper containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set ZooKeeper containers' Security Context runAsNonRoot | `true` | +| `containerSecurityContext.allowPrivilegeEscalation` | Force the child process to be run as nonprivilege | `false` | +| `hostAliases` | ZooKeeper pods host aliases | `[]` | +| `podLabels` | Extra labels for ZooKeeper pods | `{}` | +| `podAnnotations` | Annotations for ZooKeeper pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | Affinity for pod assignment | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Tolerations for pod assignment | `[]` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` | `Parallel` | +| `priorityClassName` | Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand | `""` | +| `schedulerName` | Kubernetes pod scheduler registry | `""` | +| `updateStrategy.type` | ZooKeeper statefulset strategy type | `RollingUpdate` | +| `updateStrategy.rollingUpdate` | ZooKeeper statefulset rolling update configuration parameters | `{}` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the ZooKeeper pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the ZooKeeper pod(s) | `[]` | +| `initContainers` | Add additional init containers to the ZooKeeper pod(s) | `[]` | +| `pdb.create` | Deploy a pdb object for the ZooKeeper pod | `false` | +| `pdb.minAvailable` | Minimum available ZooKeeper replicas | `""` | +| `pdb.maxUnavailable` | Maximum unavailable ZooKeeper replicas | `1` | -### Exposure parameters -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `service.type` | Kubernetes Service type | `ClusterIP` | -| `service.port` | ZooKeeper port | `2181` | -| `service.followerPort` | ZooKeeper follower port | `2888` | -| `service.electionPort` | ZooKeeper election port | `3888` | -| `service.publishNotReadyAddresses` | If the ZooKeeper headless service should publish DNS records for not ready pods | `true` | -| `serviceAccount.create` | Enable creation of ServiceAccount for zookeeper pod | `false` | -| `serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | Generated using the `zookeeper.fullname` template | -| `service.tls.client_enable` | Enable tls for client connections | `false` | -| `service.tls.quorum_enable` | Enable tls for quorum protocol | `false` | -| `service.tls.disable_base_client_port` | Remove client port from service definitions. | `false` | -| `service.tls.client_port` | Service port for tls client connections | `3181` | -| `service.tls.client_keystore_path` | KeyStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_key_store/key_store_file` | -| `service.tls.client_keystore_password` | KeyStore password. You can use environment variables. | `nil` | -| `service.tls.client_truststore_path` | TrustStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_trust_store/trust_store_file` | -| `service.tls.client_truststore_password` | TrustStore password. You can use environment variables. | `nil` | -| `service.tls.quorum_keystore_path` | KeyStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_key_store/key_store_file` | -| `service.tls.quorum_keystore_password` | KeyStore password. You can use environment variables. | `nil` | -| `service.tls.quorum_truststore_path` | TrustStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_trust_store/trust_store_file` | -| `service.tls.quorum_truststore_password` | TrustStore password. You can use environment variables. | `nil` | -| `service.annotations` | Annotations for the Service | `{}` | -| `service.headless.annotations` | Annotations for the Headless Service | `{}` | -| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | -| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +### Traffic Exposure parameters + +| Name | Description | Value | +| ------------------------------------------- | --------------------------------------------------------------------------------------- | ----------- | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.ports.client` | ZooKeeper client service port | `2181` | +| `service.ports.tls` | ZooKeeper TLS service port | `3181` | +| `service.ports.follower` | ZooKeeper follower service port | `2888` | +| `service.ports.election` | ZooKeeper election service port | `3888` | +| `service.nodePorts.client` | Node port for clients | `""` | +| `service.nodePorts.tls` | Node port for TLS | `""` | +| `service.disableBaseClientPort` | Remove client port from service definitions. | `false` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | ZooKeeper service Cluster IP | `""` | +| `service.loadBalancerIP` | ZooKeeper service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | ZooKeeper service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | ZooKeeper service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for ZooKeeper service | `{}` | +| `service.extraPorts` | Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value) | `[]` | +| `service.headless.annotations` | Annotations for the Headless Service | `{}` | +| `service.headless.publishNotReadyAddresses` | If the ZooKeeper headless service should publish DNS records for not ready pods | `true` | +| `service.headless.servicenameOverride` | String to partially override headless service name | `""` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | + + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------------- | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for ZooKeeper pod | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | + ### Persistence parameters -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `persistence.enabled` | Enable Zookeeper data persistence using PVC | `true` | -| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` | `nil` (evaluated as a template) | -| `persistence.storageClass` | PVC Storage Class for ZooKeeper data volume | `nil` | -| `persistence.accessMode` | PVC Access Mode for ZooKeeper data volume | `ReadWriteOnce` | -| `persistence.size` | PVC Storage Request for ZooKeeper data volume | `8Gi` | -| `persistence.annotations` | Annotations for the PVC | `{}` (evaluated as a template) | -| `persistence.dataLogDir.size` | PVC Storage Request for ZooKeeper's Data log directory | `8Gi` | -| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for Zookeeper's Data log directory | `nil` (evaluated as a template) | +| Name | Description | Value | +| -------------------------------------- | ------------------------------------------------------------------------------ | ------------------- | +| `persistence.enabled` | Enable ZooKeeper data persistence using PVC. If false, use emptyDir | `true` | +| `persistence.existingClaim` | Name of an existing PVC to use (only when deploying a single replica) | `""` | +| `persistence.storageClass` | PVC Storage Class for ZooKeeper data volume | `""` | +| `persistence.accessModes` | PVC Access modes | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for ZooKeeper data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `persistence.labels` | Labels for the PVC | `{}` | +| `persistence.selector` | Selector to match an existing Persistent Volume for ZooKeeper's data PVC | `{}` | +| `persistence.dataLogDir.size` | PVC Storage Request for ZooKeeper's dedicated data log directory | `8Gi` | +| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory | `""` | +| `persistence.dataLogDir.selector` | Selector to match an existing Persistent Volume for ZooKeeper's data log PVC | `{}` | + ### Volume Permissions parameters -| Parameter | Description | Default | -|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | -| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | -| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | -| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | -| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | -| `volumePermissions.resources` | Init container resource requests/limit | `nil` | +| Name | Description | Value | +| ------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image repository | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag (immutable tags are recommended) | `11-debian-11-r69` | +| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | Init container volume-permissions image pull secrets | `[]` | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | +| `volumePermissions.containerSecurityContext.enabled` | Enabled init container Security Context | `true` | +| `volumePermissions.containerSecurityContext.runAsUser` | User ID for the init container | `0` | + ### Metrics parameters -| Parameter | Description | Default | -|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| -| `metrics.enabled` | Enable prometheus to access zookeeper metrics endpoint | `false` | -| `metrics.containerPort` | Port where a Jetty server will expose Prometheus metrics | `9141` | -| `metrics.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for Jetty server exposing Prometheus metrics | `ClusterIP` | -| `metrics.service.port` | Prometheus metrics service port | `9141` | -| `metrics.service.annotations` | Service annotations for Prometheus to auto-discover the metrics endpoint | `{prometheus.io/scrape: "true", prometheus.io/port: "9141"}` | -| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | -| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource | The Release Namespace | -| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `nil` (Prometheus Operator default value) | -| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `nil` (Prometheus Operator default value) | -| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `nil` | -| `metrics.prometheusRule.enabled` | if `true`, creates a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) | `false` | -| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource | The Release Namespace | -| `metrics.prometheusRule.selector` | Prometheus instance selector labels | `nil` | -| `metrics.prometheusRule.rules` | Prometheus Rule definitions (see values.yaml for examples) | `[]` | +| Name | Description | Value | +| ------------------------------------------ | ------------------------------------------------------------------------------------- | ----------- | +| `metrics.enabled` | Enable Prometheus to access ZooKeeper metrics endpoint | `false` | +| `metrics.containerPort` | ZooKeeper Prometheus Exporter container port | `9141` | +| `metrics.service.type` | ZooKeeper Prometheus Exporter service type | `ClusterIP` | +| `metrics.service.port` | ZooKeeper Prometheus Exporter service port | `9141` | +| `metrics.service.annotations` | Annotations for Prometheus to auto-discover the metrics endpoint | `{}` | +| `metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | +| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` | + + +### TLS/SSL parameters + +| Name | Description | Value | +| ----------------------------------------- | -------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------- | +| `tls.client.enabled` | Enable TLS for client connections | `false` | +| `tls.client.auth` | SSL Client auth. Can be "none", "want" or "need". | `none` | +| `tls.client.autoGenerated` | Generate automatically self-signed TLS certificates for ZooKeeper client communications | `false` | +| `tls.client.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper client communications | `""` | +| `tls.client.existingSecretKeystoreKey` | The secret key from the tls.client.existingSecret containing the Keystore. | `""` | +| `tls.client.existingSecretTruststoreKey` | The secret key from the tls.client.existingSecret containing the Truststore. | `""` | +| `tls.client.keystorePath` | Location of the KeyStore file used for Client connections | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks` | +| `tls.client.truststorePath` | Location of the TrustStore file used for Client connections | `/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks` | +| `tls.client.passwordsSecretName` | Existing secret containing Keystore and truststore passwords | `""` | +| `tls.client.passwordsSecretKeystoreKey` | The secret key from the tls.client.passwordsSecretName containing the password for the Keystore. | `""` | +| `tls.client.passwordsSecretTruststoreKey` | The secret key from the tls.client.passwordsSecretName containing the password for the Truststore. | `""` | +| `tls.client.keystorePassword` | Password to access KeyStore if needed | `""` | +| `tls.client.truststorePassword` | Password to access TrustStore if needed | `""` | +| `tls.quorum.enabled` | Enable TLS for quorum protocol | `false` | +| `tls.quorum.auth` | SSL Quorum Client auth. Can be "none", "want" or "need". | `none` | +| `tls.quorum.autoGenerated` | Create self-signed TLS certificates. Currently only supports PEM certificates. | `false` | +| `tls.quorum.existingSecret` | Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol | `""` | +| `tls.quorum.existingSecretKeystoreKey` | The secret key from the tls.quorum.existingSecret containing the Keystore. | `""` | +| `tls.quorum.existingSecretTruststoreKey` | The secret key from the tls.quorum.existingSecret containing the Truststore. | `""` | +| `tls.quorum.keystorePath` | Location of the KeyStore file used for Quorum protocol | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks` | +| `tls.quorum.truststorePath` | Location of the TrustStore file used for Quorum protocol | `/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks` | +| `tls.quorum.passwordsSecretName` | Existing secret containing Keystore and truststore passwords | `""` | +| `tls.quorum.passwordsSecretKeystoreKey` | The secret key from the tls.quorum.passwordsSecretName containing the password for the Keystore. | `""` | +| `tls.quorum.passwordsSecretTruststoreKey` | The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore. | `""` | +| `tls.quorum.keystorePassword` | Password to access KeyStore if needed | `""` | +| `tls.quorum.truststorePassword` | Password to access TrustStore if needed | `""` | +| `tls.resources.limits` | The resources limits for the TLS init container | `{}` | +| `tls.resources.requests` | The requested resources for the TLS init container | `{}` | + Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, ```console $ helm install my-release \ --set auth.clientUser=newUser \ - bitnami/zookeeper + my-repo/zookeeper ``` The above command sets the ZooKeeper user to `newUser`. +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, ```console -$ helm install my-release -f values.yaml bitnami/zookeeper +$ helm install my-release -f values.yaml my-repo/zookeeper ``` > **Tip**: You can use the default [values.yaml](values.yaml) ## Configuration and installation details -### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) +### [Rolling vs Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. -### Production configuration +### Configure log level -This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. +You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable or the parameter `logLevel`. By default, it is set to `ERROR` because each use of the liveness probe and the readiness probe produces an `INFO` message on connection and a `WARN` message on disconnection, generating a high volume of noise in your logs. -- Number of ZooKeeper nodes: +In order to remove that log noise so levels can be set to 'INFO', two changes must be made. -```diff -- replicaCount: 1 -+ replicaCount: 3 +First, ensure that you are not getting metrics via the deprecated pattern of polling 'mntr' on the ZooKeeper client port. The preferred method of polling for Apache ZooKeeper metrics is the ZooKeeper metrics server. This is supported in this chart when setting `metrics.enabled` to `true`. + +Second, to avoid the connection/disconnection messages from the probes, you can set custom values for these checks which direct them to the ZooKeeper Admin Server instead of the client port. By default, an Admin Server will be started that listens on `localhost` at port `8080`. The following is an example of this use of the Admin Server for probes: + +``` +livenessProbe: + enabled: false +readinessProbe: + enabled: false +customLivenessProbe: + exec: + command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep ruok'] + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 +customReadinessProbe: + exec: + command: ['/bin/bash', '-c', 'curl -s -m 2 http://localhost:8080/commands/ruok | grep error | grep null'] + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 ``` -- Enable prometheus metrics: +You can also set the log4j logging level and what log appenders are turned on, by using `ZOO_LOG4J_PROP` set inside of conf/log4j.properties as zookeeper.root.logger by default to -```diff -- metrics.enabled: false -+ metrics.enabled: true +```console +zookeeper.root.logger=INFO, CONSOLE ``` +the available appender is -### Log level - -You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable. By default, it is set to `ERROR` because of each readiness probe produce an `INFO` message on connection and a `WARN` message on disconnection. +- CONSOLE +- ROLLINGFILE +- RFAAUDIT +- TRACEFILE ## Persistence -The [Bitnami ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container. +The [Bitnami ZooKeeper](https://github.com/bitnami/containers/tree/main/bitnami/zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container. -Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. -See the [Parameters](#parameters) section to configure the PVC or to disable persistence. +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you encounter errors when working with persistent volumes, refer to our [troubleshooting guide for persistent volumes](https://docs.bitnami.com/kubernetes/faq/troubleshooting/troubleshooting-persistence-volumes/). ### Adjust permissions of persistent volume mountpoint @@ -256,14 +414,76 @@ As an alternative, this chart supports using an initContainer to change the owne You can enable this initContainer by setting `volumePermissions.enabled` to `true`. -### Data Log Directory +### Configure the data log directory -You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snapshots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string an it result in the log being written to the data directory (Zookeeper's default behavior). +You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snaphots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string and it will result in the log being written to the data directory (Zookeeper's default behavior). When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information. +### Set pod affinity + +This chart allows you to set custom pod affinity using the `affinity` parameter. Find more information about pod affinity in the [Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, you can use any of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + ## Upgrading +### To 11.0.0 + +This major version removes `commonAnnotations` and `commonLabels` from `volumeClaimTemplates`. Now annotations and labels can be set in volume claims using `persistence.annotations` and `persistence.labels` values. If the previous deployment has already set `commonAnnotations` and/or `commonLabels` values, to ensure a clean upgrade from previous version without loosing data, please set `persistence.annotations` and/or `persistence.labels` values with the same content as the common values. + +### To 10.0.0 + +This new version of the chart adds support for server-server authentication. +The chart previously supported client-server authentication, to avoid confusion, the previous parameters have been renamed from `auth.*` to `auth.client.*`. + +### To 9.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.8.0. Upgrade compatibility is not guaranteed. + +### To 8.0.0 + +This major release renames several values in this chart and adds missing features, in order to be inline with the rest of assets in the Bitnami charts repository. + +Affected values: + +- `allowAnonymousLogin` is deprecated. +- `containerPort`, `tlsContainerPort`, `followerContainerPort` and `electionContainerPort` have been regrouped under the `containerPorts` map. +- `service.port`, `service.tlsClientPort`, `service.followerPort`, and `service.electionPort` have been regrouped under the `service.ports` map. +- `updateStrategy` (string) and `rollingUpdatePartition` are regrouped under the `updateStrategy` map. +- `podDisruptionBudget.*` parameters are renamed to `pdb.*`. + +### To 7.0.0 + +This new version renames the parameters used to configure TLS for both client and quorum. + +- `service.tls.disable_base_client_port` is renamed to `service.disableBaseClientPort` +- `service.tls.client_port` is renamed to `service.tlsClientPort` +- `service.tls.client_enable` is renamed to `tls.client.enabled` +- `service.tls.client_keystore_path` is renamed to `tls.client.keystorePath` +- `service.tls.client_truststore_path` is renamed to `tls.client.truststorePath` +- `service.tls.client_keystore_password` is renamed to `tls.client.keystorePassword` +- `service.tls.client_truststore_password` is renamed to `tls.client.truststorePassword` +- `service.tls.quorum_enable` is renamed to `tls.quorum.enabled` +- `service.tls.quorum_keystore_path` is renamed to `tls.quorum.keystorePath` +- `service.tls.quorum_truststore_path` is renamed to `tls.quorum.truststorePath` +- `service.tls.quorum_keystore_password` is renamed to `tls.quorum.keystorePassword` +- `service.tls.quorum_truststore_password` is renamed to `tls.quorum.truststorePassword` + +### To 6.1.0 + +This version introduces `bitnami/common`, a [library chart](https://helm.sh/docs/topics/library_charts/#helm) as a dependency. More documentation about this new utility could be found [here](https://github.com/bitnami/charts/tree/main/bitnami/common#bitnami-common-library-chart). Please, make sure that you have updated the chart dependencies before executing any upgrade. + +### To 6.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +[Learn more about this change and related upgrade considerations](https://docs.bitnami.com/kubernetes/infrastructure/zookeeper/administration/upgrade-helm3/). + ### To 5.21.0 A couple of parameters related to Zookeeper metrics were renamed or disappeared in favor of new ones: @@ -295,3 +515,19 @@ Use the workaround below to upgrade from versions previous to 1.0.0. The followi ```console $ kubectl delete statefulset zookeeper-zookeeper --cascade=false ``` + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/.helmignore b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/Chart.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/Chart.yaml new file mode 100644 index 000000000..f9ba944c8 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 2.2.2 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/main/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- name: Bitnami + url: https://github.com/bitnami/charts +name: common +sources: +- https://github.com/bitnami/charts +- https://www.bitnami.com/ +type: library +version: 2.2.2 diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/README.md b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/README.md new file mode 100644 index 000000000..ec43a5fab --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/README.md @@ -0,0 +1,351 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 1.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.nodes.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.nodes.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pods.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pods.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.topologyKey` | Return a topologyKey definition | `dict "topologyKey" "FOO"` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.cronjob.apiVersion` | Return the appropriate apiVersion for cronjob. | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.policy.apiVersion` | Return the appropriate apiVersion for podsecuritypolicy. | `.` Chart context | +| `common.capabilities.networkPolicy.apiVersion` | Return the appropriate apiVersion for networkpolicy. | `.` Chart context | +| `common.capabilities.apiService.apiVersion` | Return the appropriate apiVersion for APIService. | `.` Chart context | +| `common.capabilities.hpa.apiVersion` | Return the appropriate apiVersion for Horizontal Pod Autoscaler | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | +| `common.images.renderPullSecrets` | Return the proper Docker Image Registry Secret Names (evaluates values as templates) | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | +| `common.ingress.supportsPathType` | Prints "true" if the pathType field is supported | `.` Chart context | +| `common.ingress.supportsIngressClassname` | Prints "true" if the ingressClassname field is supported | `.` Chart context | +| `common.ingress.certManagerRequest` | Prints "true" if required cert-manager annotations for TLS signed certificates are set in the Ingress annotations | `dict "annotations" .Values.path.to.the.ingress.annotations` | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|-----------------------------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Labels to use on `deploy.spec.selector.matchLabels` and `svc.spec.selector` | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-----------------------------------|-----------------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.namespace` | Allow the release namespace to be overridden | `.` Chart context | +| `common.names.fullname.namespace` | Create a fully qualified app name adding the installation's namespace | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|-----------------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.storage.class` | Return the proper Storage Class | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.mysql.passwords` | This helper will ensure required password for MySQL are not empty. It returns a shared error for all the values. | `dict "secret" "mysql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mysql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for Redis® are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets (evaluated as templates). + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 -d) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 -d) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +## License + +Copyright © 2022 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl new file mode 100644 index 000000000..81902a681 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_affinities.tpl @@ -0,0 +1,106 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . | quote }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a topologyKey definition +{{ include "common.affinities.topologyKey" (dict "topologyKey" "BAR") -}} +*/}} +{{- define "common.affinities.topologyKey" -}} +{{ .topologyKey | default "kubernetes.io/hostname" -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "extraMatchLabels" .Values.extraMatchLabels "topologyKey" "BAR" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +{{- $extraMatchLabels := default (dict) .extraMatchLabels -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + {{- range $key, $value := $extraMatchLabels }} + {{ $key }}: {{ $value | quote }} + {{- end }} + topologyKey: {{ include "common.affinities.topologyKey" (dict "topologyKey" .topologyKey) }} +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..9d9b76004 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_capabilities.tpl @@ -0,0 +1,154 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "common.capabilities.policy.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "policy/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "common.capabilities.networkPolicy.apiVersion" -}} +{{- if semverCompare "<1.7-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for cronjob. +*/}} +{{- define "common.capabilities.cronjob.apiVersion" -}} +{{- if semverCompare "<1.21-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "batch/v1beta1" -}} +{{- else -}} +{{- print "batch/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for APIService. +*/}} +{{- define "common.capabilities.apiService.apiVersion" -}} +{{- if semverCompare "<1.10-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiregistration.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiregistration.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "common.capabilities.hpa.apiVersion" -}} +{{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .context) -}} +{{- if .beta2 -}} +{{- print "autoscaling/v2beta2" -}} +{{- else -}} +{{- print "autoscaling/v2beta1" -}} +{{- end -}} +{{- else -}} +{{- print "autoscaling/v2" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_errors.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..a79cc2e32 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_images.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_images.tpl new file mode 100644 index 000000000..46c659e79 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_images.tpl @@ -0,0 +1,76 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $separator := ":" -}} +{{- $termination := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if .imageRoot.digest }} + {{- $separator = "@" -}} + {{- $termination = .imageRoot.digest | toString -}} +{{- end -}} +{{- printf "%s/%s%s%s" $registryName $repositoryName $separator $termination -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names (deprecated: use common.images.renderPullSecrets instead) +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names evaluating values as templates +{{ include "common.images.renderPullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "context" $) }} +*/}} +{{- define "common.images.renderPullSecrets" -}} + {{- $pullSecrets := list }} + {{- $context := .context }} + + {{- if $context.Values.global }} + {{- range $context.Values.global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets (include "common.tplvalues.render" (dict "value" . "context" $context)) -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl new file mode 100644 index 000000000..831da9caa --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_ingress.tpl @@ -0,0 +1,68 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if or (typeIs "int" .servicePort) (typeIs "float64" .servicePort) }} + number: {{ .servicePort | int }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the ingressClassname field is supported +Usage: +{{ include "common.ingress.supportsIngressClassname" . }} +*/}} +{{- define "common.ingress.supportsIngressClassname" -}} +{{- if semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if cert-manager required annotations for TLS signed +certificates are set in the Ingress annotations +Ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations +Usage: +{{ include "common.ingress.certManagerRequest" ( dict "annotations" .Values.path.to.the.ingress.annotations ) }} +*/}} +{{- define "common.ingress.certManagerRequest" -}} +{{ if or (hasKey .annotations "cert-manager.io/cluster-issuer") (hasKey .annotations "cert-manager.io/issuer") (hasKey .annotations "kubernetes.io/tls-acme") }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_labels.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..252066c7e --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_names.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_names.tpl new file mode 100644 index 000000000..617a23489 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_names.tpl @@ -0,0 +1,66 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a default fully qualified dependency name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +Usage: +{{ include "common.names.dependency.fullname" (dict "chartName" "dependency-chart-name" "chartValues" .Values.dependency-chart "context" $) }} +*/}} +{{- define "common.names.dependency.fullname" -}} +{{- if .chartValues.fullnameOverride -}} +{{- .chartValues.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .chartName .chartValues.nameOverride -}} +{{- if contains $name .context.Release.Name -}} +{{- .context.Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .context.Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts. +*/}} +{{- define "common.names.namespace" -}} +{{- default .Release.Namespace .Values.namespaceOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a fully qualified app name adding the installation's namespace. +*/}} +{{- define "common.names.fullname.namespace" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) (include "common.names.namespace" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..a1708b2e8 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_secrets.tpl @@ -0,0 +1,165 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/main/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. + +The order in which this function returns a secret password: + 1. Already existing 'Secret' resource + (If a 'Secret' resource is found under the name provided to the 'secret' parameter to this function and that 'Secret' resource contains a key with the name passed as the 'key' parameter to this function then the value of this existing secret password will be returned) + 2. Password provided via the values.yaml + (If one of the keys passed to the 'providedValues' parameter to this function is a valid path to a key in the values.yaml and has a value, the value of the first key with a value will be returned) + 3. Randomly generated secret password + (A new random secret password with the length specified in the 'length' parameter will be generated and returned) + +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data }} +{{- if $secretData }} + {{- if hasKey $secretData .key }} + {{- $password = index $secretData .key | quote }} + {{- else }} + {{- printf "\nPASSWORDS ERROR: The secret \"%s\" does not contain the key \"%s\"\n" .secret .key | fail -}} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Reuses the value from an existing secret, otherwise sets its value to a default value. + +Usage: +{{ include "common.secrets.lookup" (dict "secret" "secret-name" "key" "keyName" "defaultValue" .Values.myValue "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - defaultValue - String - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - context - Context - Required - Parent context. + +*/}} +{{- define "common.secrets.lookup" -}} +{{- $value := "" -}} +{{- $defaultValue := required "\n'common.secrets.lookup': Argument 'defaultValue' missing or empty" .defaultValue -}} +{{- $secretData := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret).data -}} +{{- if and $secretData (hasKey $secretData .key) -}} + {{- $value = index $secretData .key -}} +{{- else -}} + {{- $value = $defaultValue | toString | b64enc -}} +{{- end -}} +{{- printf "%s" $value -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" (include "common.names.namespace" .context) .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_storage.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..60e2a844f --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..2db166851 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_utils.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..b1ead50cf --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ include "common.names.namespace" .context | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 -d) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..ae10fa41e --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 000000000..ded1ae3bc --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 000000000..b6906ff77 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 000000000..f820ec107 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB® required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB® values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB® is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl new file mode 100644 index 000000000..74472a061 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_mysql.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MySQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.mysql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MySQL values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mysql.passwords" -}} + {{- $existingSecret := include "common.mysql.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mysql.values.enabled" . -}} + {{- $architecture := include "common.mysql.values.architecture" . -}} + {{- $authPrefix := include "common.mysql.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mysql-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mysql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mysql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mysql.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mysql. + +Usage: +{{ include "common.mysql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mysql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mysql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mysql.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mysql.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mysql.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MySQL is used as subchart or not. Default: false +*/}} +{{- define "common.mysql.values.key.auth" -}} + {{- if .subchart -}} + mysql.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 000000000..164ec0d01 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl new file mode 100644 index 000000000..dcccfc1ae --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,76 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis® required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $standarizedVersion := include "common.redis.values.standarized.version" . }} + + {{- $existingSecret := ternary (printf "%s%s" $valueKeyPrefix "auth.existingSecret") (printf "%s%s" $valueKeyPrefix "existingSecret") (eq $standarizedVersion "true") }} + {{- $existingSecretValue := include "common.utils.getValueFromKey" (dict "key" $existingSecret "context" .context) }} + + {{- $valueKeyRedisPassword := ternary (printf "%s%s" $valueKeyPrefix "auth.password") (printf "%s%s" $valueKeyPrefix "password") (eq $standarizedVersion "true") }} + {{- $valueKeyRedisUseAuth := ternary (printf "%s%s" $valueKeyPrefix "auth.enabled") (printf "%s%s" $valueKeyPrefix "usePassword") (eq $standarizedVersion "true") }} + + {{- if and (or (not $existingSecret) (eq $existingSecret "\"\"")) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $useAuth := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUseAuth "context" .context) -}} + {{- if eq $useAuth "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} + +{{/* +Checks whether the redis chart's includes the standarizations (version >= 14) + +Usage: +{{ include "common.redis.values.standarized.version" (dict "context" $) }} +*/}} +{{- define "common.redis.values.standarized.version" -}} + + {{- $standarizedAuth := printf "%s%s" (include "common.redis.values.keys.prefix" .) "auth" -}} + {{- $standarizedAuthValues := include "common.utils.getValueFromKey" (dict "key" $standarizedAuth "context" .context) }} + + {{- if $standarizedAuthValues -}} + {{- true -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl new file mode 100644 index 000000000..9a814cf40 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/values.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/values.yaml new file mode 100644 index 000000000..f2df68e5e --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/charts/common/values.yaml @@ -0,0 +1,5 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +## @skip exampleValue +## +exampleValue: common-chart diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt old mode 100755 new mode 100644 index 3cc2edbed..c287e1e56 --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt @@ -1,5 +1,8 @@ -{{- if contains .Values.service.type "LoadBalancer" }} -{{- if not .Values.auth.clientPassword }} +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +{{- if and (not .Values.auth.client.enabled) (eq .Values.service.type "LoadBalancer") }} ------------------------------------------------------------------------------- WARNING @@ -13,45 +16,61 @@ ------------------------------------------------------------------------------- {{- end }} -{{- end }} ** Please be patient while the chart is being deployed ** -ZooKeeper can be accessed via port 2181 on the following DNS name from within your cluster: +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: - {{ template "zookeeper.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/zookeeper/entrypoint.sh /opt/bitnami/scripts/zookeeper/run.sh + +{{- else }} + +ZooKeeper can be accessed via port {{ .Values.service.ports.client }} on the following DNS name from within your cluster: + + {{ template "common.names.fullname" . }}.{{ template "zookeeper.namespace" . }}.svc.{{ .Values.clusterDomain }} To connect to your ZooKeeper server run the following commands: - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "zookeeper.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=zookeeper" -o jsonpath="{.items[0].metadata.name}") + export POD_NAME=$(kubectl get pods --namespace {{ template "zookeeper.namespace" . }} -l "app.kubernetes.io/name={{ template "common.names.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=zookeeper" -o jsonpath="{.items[0].metadata.name}") kubectl exec -it $POD_NAME -- zkCli.sh To connect to your ZooKeeper server from outside the cluster execute the following commands: -{{- if contains "NodePort" .Values.service.type }} +{{- if eq .Values.service.type "NodePort" }} - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "zookeeper.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ template "zookeeper.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }}) zkCli.sh $NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.service.type }} +{{- else if eq .Values.service.type "LoadBalancer" }} NOTE: It may take a few minutes for the LoadBalancer IP to be available. - Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "zookeeper.fullname" . }}' + Watch the status with: 'kubectl get svc --namespace {{ template "zookeeper.namespace" . }} -w {{ template "common.names.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "zookeeper.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") - zkCli.sh $SERVICE_IP:2181 + export SERVICE_IP=$(kubectl get svc --namespace {{ template "zookeeper.namespace" . }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + zkCli.sh $SERVICE_IP:{{ .Values.service.ports.client }} -{{- else if contains "ClusterIP" .Values.service.type }} +{{- else if eq .Values.service.type "ClusterIP" }} - kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "zookeeper.fullname" . }} 2181:2181 & - zkCli.sh 127.0.0.1:2181 + kubectl port-forward --namespace {{ template "zookeeper.namespace" . }} svc/{{ template "common.names.fullname" . }} {{ .Values.service.ports.client }}:{{ .Values.containerPorts.client }} & + zkCli.sh 127.0.0.1:{{ .Values.service.ports.client }} {{- end }} - -{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} - -WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. -+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ - {{- end }} + +{{- include "zookeeper.validateValues" . }} +{{- include "zookeeper.checkRollingTags" . }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl old mode 100755 new mode 100644 index f82502d69..d855bada0 --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl @@ -1,34 +1,43 @@ {{/* vim: set filetype=mustache: */}} + {{/* -Expand the name of the chart. +Return the proper ZooKeeper image name */}} -{{- define "zookeeper.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- define "zookeeper.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} {{- end -}} {{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. +Return the proper image name (for the init container volume-permissions image) */}} -{{- define "zookeeper.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} +{{- define "zookeeper.volumePermissions.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} {{- end -}} {{/* -Create chart name and version as used by the chart label. +Return the proper Docker Image Registry Secret Names */}} -{{- define "zookeeper.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- define "zookeeper.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Check if there are rolling tags in the images +*/}} +{{- define "zookeeper.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} +{{- end -}} + +{{/* +Return ZooKeeper Namespace to use +*/}} +{{- define "zookeeper.namespace" -}} +{{- if .Values.namespaceOverride -}} + {{- .Values.namespaceOverride -}} +{{- else -}} + {{- .Release.Namespace -}} +{{- end -}} {{- end -}} {{/* @@ -36,177 +45,317 @@ Create chart name and version as used by the chart label. */}} {{- define "zookeeper.serviceAccountName" -}} {{- if .Values.serviceAccount.create -}} - {{ default (include "zookeeper.fullname" .) .Values.serviceAccount.name }} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} {{- else -}} {{ default "default" .Values.serviceAccount.name }} {{- end -}} {{- end -}} {{/* -Return the proper Zookeeper image name +Return the ZooKeeper client-server authentication credentials secret */}} -{{- define "zookeeper.image" -}} -{{- $registryName := .Values.image.registry -}} -{{- $repositoryName := .Values.image.repository -}} -{{- $tag := .Values.image.tag | toString -}} +{{- define "zookeeper.client.secretName" -}} +{{- if .Values.auth.client.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.client.existingSecret $) -}} +{{- else -}} + {{- printf "%s-client-auth" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + {{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option +Return the ZooKeeper server-server authentication credentials secret */}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} +{{- define "zookeeper.quorum.secretName" -}} +{{- if .Values.auth.quorum.existingSecret -}} + {{- printf "%s" (tpl .Values.auth.quorum.existingSecret $) -}} +{{- else -}} + {{- printf "%s-quorum-auth" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ZooKeeper client-server authentication credentials secret object should be created +*/}} +{{- define "zookeeper.client.createSecret" -}} +{{- if and .Values.auth.client.enabled (empty .Values.auth.client.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a ZooKeeper server-server authentication credentials secret object should be created +*/}} +{{- define "zookeeper.quorum.createSecret" -}} +{{- if and .Values.auth.quorum.enabled (empty .Values.auth.quorum.existingSecret) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Returns the available value for certain key in an existing secret (if it exists), +otherwise it generates a random value. +*/}} +{{- define "getValueFromSecret" }} + {{- $len := (default 16 .Length) | int -}} + {{- $obj := (lookup "v1" "Secret" .Namespace .Name).data -}} + {{- if $obj }} + {{- index $obj .Key | b64dec -}} {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- randAlphaNum $len -}} {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} -{{- end -}} -{{- end -}} - -{{/* -Return the proper Docker Image Registry Secret Names -*/}} -{{- define "zookeeper.imagePullSecrets" -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. -Also, we can not use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} -{{- if .Values.global.imagePullSecrets }} -imagePullSecrets: -{{- range .Values.global.imagePullSecrets }} - - name: {{ . }} {{- end }} -{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} -imagePullSecrets: -{{- range .Values.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.volumePermissions.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- end -}} -{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} -imagePullSecrets: -{{- range .Values.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- range .Values.volumePermissions.image.pullSecrets }} - - name: {{ . }} -{{- end }} -{{- end -}} -{{- end -}} {{/* -Common labels +Return the ZooKeeper configuration ConfigMap name */}} -{{- define "zookeeper.labels" -}} -app.kubernetes.io/name: {{ include "zookeeper.name" . }} -helm.sh/chart: {{ include "zookeeper.chart" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end -}} - -{{/* -Renders a value that contains template. -Usage: -{{ include "zookeeper.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} -*/}} -{{- define "zookeeper.tplValue" -}} - {{- if typeIs "string" .value }} - {{- tpl .value .context }} - {{- else }} - {{- tpl (.value | toYaml) .context }} - {{- end }} -{{- end -}} - -{{/* -Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector -*/}} -{{- define "zookeeper.matchLabels" -}} -app.kubernetes.io/name: {{ include "zookeeper.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end -}} - -{{/* -Return ZooKeeper Client Password -*/}} -{{- define "zookeeper.clientPassword" -}} -{{- if .Values.auth.clientPassword -}} - {{- .Values.auth.clientPassword -}} +{{- define "zookeeper.configmapName" -}} +{{- if .Values.existingConfigmap -}} + {{- printf "%s" (tpl .Values.existingConfigmap $) -}} {{- else -}} - {{- randAlphaNum 10 -}} + {{- printf "%s" (include "common.names.fullname" .) -}} {{- end -}} {{- end -}} {{/* -Return ZooKeeper Servers Passwords +Return true if a ConfigMap object should be created for ZooKeeper configuration */}} -{{- define "zookeeper.serverPasswords" -}} -{{- if .Values.auth.serverPasswords -}} - {{- .Values.auth.serverPasswords -}} -{{- else -}} - {{- randAlphaNum 10 -}} +{{- define "zookeeper.createConfigmap" -}} +{{- if and .Values.configuration (not .Values.existingConfigmap) }} + {{- true -}} {{- end -}} {{- end -}} {{/* -Return the proper image name (for the init container volume-permissions image) +Return true if a TLS secret should be created for ZooKeeper quorum */}} -{{- define "zookeeper.volumePermissions.image" -}} -{{- $registryName := .Values.volumePermissions.image.registry -}} -{{- $repositoryName := .Values.volumePermissions.image.repository -}} -{{- $tag := .Values.volumePermissions.image.tag | toString -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. -Also, we can't use a single if because lazy evaluation is not an option -*/}} -{{- if .Values.global }} - {{- if .Values.global.imageRegistry }} - {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} - {{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} - {{- end -}} -{{- else -}} - {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- define "zookeeper.quorum.createTlsSecret" -}} +{{- if and .Values.tls.quorum.enabled .Values.tls.quorum.autoGenerated (not .Values.tls.quorum.existingSecret) }} + {{- true -}} {{- end -}} {{- end -}} {{/* -Return the proper Storage Class +Return the secret containing ZooKeeper quorum TLS certificates */}} -{{- define "zookeeper.storageClass" -}} -{{/* -Helm 2.11 supports the assignment of a value to a variable defined in a different scope, -but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. -*/}} -{{- if .Values.global -}} - {{- if .Values.global.storageClass -}} - {{- if (eq "-" .Values.global.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.global.storageClass -}} - {{- end -}} - {{- else -}} - {{- if .Values.persistence.storageClass -}} - {{- if (eq "-" .Values.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} - {{- end -}} - {{- end -}} - {{- end -}} +{{- define "zookeeper.quorum.tlsSecretName" -}} +{{- $secretName := .Values.tls.quorum.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} {{- else -}} - {{- if .Values.persistence.storageClass -}} - {{- if (eq "-" .Values.persistence.storageClass) -}} - {{- printf "storageClassName: \"\"" -}} - {{- else }} - {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} - {{- end -}} - {{- end -}} + {{- printf "%s-quorum-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper quorum +*/}} +{{- define "zookeeper.quorum.createTlsPasswordsSecret" -}} +{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.passwordsSecretName) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of the secret containing the Keystore and Truststore password +*/}} +{{- define "zookeeper.quorum.tlsPasswordsSecret" -}} +{{- $secretName := .Values.tls.quorum.passwordsSecretName -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-quorum-tls-pass" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a TLS secret should be created for ZooKeeper client +*/}} +{{- define "zookeeper.client.createTlsSecret" -}} +{{- if and .Values.tls.client.enabled .Values.tls.client.autoGenerated (not .Values.tls.client.existingSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the secret containing ZooKeeper client TLS certificates +*/}} +{{- define "zookeeper.client.tlsSecretName" -}} +{{- $secretName := .Values.tls.client.existingSecret -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-client-crt" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum keystore key to be retrieved from tls.quorum.existingSecret. +*/}} +{{- define "zookeeper.quorum.tlsKeystoreKey" -}} +{{- if and .Values.tls.quorum.existingSecret .Values.tls.quorum.existingSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.quorum.existingSecretKeystoreKey -}} +{{- else -}} + {{- printf "zookeeper.keystore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum truststore key to be retrieved from tls.quorum.existingSecret. +*/}} +{{- define "zookeeper.quorum.tlsTruststoreKey" -}} +{{- if and .Values.tls.quorum.existingSecret .Values.tls.quorum.existingSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.quorum.existingSecretTruststoreKey -}} +{{- else -}} + {{- printf "zookeeper.truststore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client keystore key to be retrieved from tls.client.existingSecret. +*/}} +{{- define "zookeeper.client.tlsKeystoreKey" -}} +{{- if and .Values.tls.client.existingSecret .Values.tls.client.existingSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.client.existingSecretKeystoreKey -}} +{{- else -}} + {{- printf "zookeeper.keystore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client truststore key to be retrieved from tls.client.existingSecret. +*/}} +{{- define "zookeeper.client.tlsTruststoreKey" -}} +{{- if and .Values.tls.client.existingSecret .Values.tls.client.existingSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.client.existingSecretTruststoreKey -}} +{{- else -}} + {{- printf "zookeeper.truststore.jks" -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret containing the Keystore and Truststore password should be created for ZooKeeper client +*/}} +{{- define "zookeeper.client.createTlsPasswordsSecret" -}} +{{- if and .Values.tls.client.enabled (not .Values.tls.client.passwordsSecretName) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the name of the secret containing the Keystore and Truststore password +*/}} +{{- define "zookeeper.client.tlsPasswordsSecret" -}} +{{- $secretName := .Values.tls.client.passwordsSecretName -}} +{{- if $secretName -}} + {{- printf "%s" (tpl $secretName $) -}} +{{- else -}} + {{- printf "%s-client-tls-pass" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum keystore password key to be retrieved from tls.quorum.passwordSecretName. +*/}} +{{- define "zookeeper.quorum.tlsPasswordKeystoreKey" -}} +{{- if and .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.passwordsSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.quorum.passwordsSecretKeystoreKey -}} +{{- else -}} + {{- printf "keystore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the quorum truststore password key to be retrieved from tls.quorum.passwordSecretName. +*/}} +{{- define "zookeeper.quorum.tlsPasswordTruststoreKey" -}} +{{- if and .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.passwordsSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.quorum.passwordsSecretTruststoreKey -}} +{{- else -}} + {{- printf "truststore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client keystore password key to be retrieved from tls.client.passwordSecretName. +*/}} +{{- define "zookeeper.client.tlsPasswordKeystoreKey" -}} +{{- if and .Values.tls.client.passwordsSecretName .Values.tls.client.passwordsSecretKeystoreKey -}} + {{- printf "%s" .Values.tls.client.passwordsSecretKeystoreKey -}} +{{- else -}} + {{- printf "keystore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the client truststore password key to be retrieved from tls.client.passwordSecretName. +*/}} +{{- define "zookeeper.client.tlsPasswordTruststoreKey" -}} +{{- if and .Values.tls.client.passwordsSecretName .Values.tls.client.passwordsSecretTruststoreKey -}} + {{- printf "%s" .Values.tls.client.passwordsSecretTruststoreKey -}} +{{- else -}} + {{- printf "truststore-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message. +*/}} +{{- define "zookeeper.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "zookeeper.validateValues.client.auth" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.quorum.auth" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.client.tls" .) -}} +{{- $messages := append $messages (include "zookeeper.validateValues.quorum.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Authentication enabled +*/}} +{{- define "zookeeper.validateValues.client.auth" -}} +{{- if and .Values.auth.client.enabled (not .Values.auth.client.existingSecret) (or (not .Values.auth.client.clientUser) (not .Values.auth.client.serverUsers)) }} +zookeeper: auth.client.enabled + In order to enable client-server authentication, you need to provide the list + of users to be created and the user to use for clients authentication. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Authentication enabled +*/}} +{{- define "zookeeper.validateValues.quorum.auth" -}} +{{- if and .Values.auth.quorum.enabled (not .Values.auth.quorum.existingSecret) (or (not .Values.auth.quorum.learnerUser) (not .Values.auth.quorum.serverUsers)) }} +zookeeper: auth.quorum.enabled + In order to enable server-server authentication, you need to provide the list + of users to be created and the user to use for quorum authentication. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Client TLS enabled +*/}} +{{- define "zookeeper.validateValues.client.tls" -}} +{{- if and .Values.tls.client.enabled (not .Values.tls.client.autoGenerated) (not .Values.tls.client.existingSecret) }} +zookeeper: tls.client.enabled + In order to enable Client TLS encryption, you also need to provide + an existing secret containing the Keystore and Truststore or + enable auto-generated certificates. +{{- end -}} +{{- end -}} + +{{/* +Validate values of ZooKeeper - Quorum TLS enabled +*/}} +{{- define "zookeeper.validateValues.quorum.tls" -}} +{{- if and .Values.tls.quorum.enabled (not .Values.tls.quorum.autoGenerated) (not .Values.tls.quorum.existingSecret) }} +zookeeper: tls.quorum.enabled + In order to enable Quorum TLS, you also need to provide + an existing secret containing the Keystore and Truststore or + enable auto-generated certificates. {{- end -}} {{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml old mode 100755 new mode 100644 index 1a4061565..12b4f489f --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml @@ -1,17 +1,17 @@ -{{- if .Values.config }} +{{- if (include "zookeeper.createConfigmap" .) }} apiVersion: v1 kind: ConfigMap metadata: - name: {{ template "zookeeper.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: {{- include "zookeeper.labels" . | nindent 4 }} + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} {{- if .Values.commonLabels }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} data: zoo.cfg: |- -{{ .Values.config | indent 4 }} -{{- end -}} + {{- include "common.tplvalues.render" ( dict "value" .Values.configuration "context" $ ) | nindent 4 }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/extra-list.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/extra-list.yaml new file mode 100644 index 000000000..9ac65f9e1 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/extra-list.yaml @@ -0,0 +1,4 @@ +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml old mode 100755 new mode 100644 index 3e26ed6c8..5afc4b3e5 --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml @@ -2,20 +2,20 @@ apiVersion: v1 kind: Service metadata: - name: {{ template "zookeeper.fullname" . }}-metrics - namespace: {{ .Release.Namespace }} - labels: {{- include "zookeeper.labels" . | nindent 4 }} - app.kubernetes.io/component: zookeeper + name: {{ template "common.names.fullname" . }}-metrics + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics {{- if .Values.commonLabels }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} annotations: {{- if .Values.metrics.service.annotations }} - {{ include "zookeeper.tplValue" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} {{- end }} spec: @@ -24,6 +24,6 @@ spec: - name: tcp-metrics port: {{ .Values.metrics.service.port }} targetPort: metrics - selector: {{- include "zookeeper.matchLabels" . | nindent 4 }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} app.kubernetes.io/component: zookeeper {{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml old mode 100755 new mode 100644 index f7e30b4bc..63532832c --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml @@ -1,43 +1,41 @@ {{- if .Values.networkPolicy.enabled }} kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} metadata: - name: {{ include "zookeeper.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: {{- include "zookeeper.labels" . | nindent 4 }} + name: {{ include "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} {{- if .Values.commonLabels }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} spec: podSelector: - matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Ingress ingress: - # Allow inbound connections to zookeeper + # Allow inbound connections to ZooKeeper - ports: - - port: {{ .Values.service.port }} - from: + - port: {{ .Values.containerPorts.client }} + {{- if .Values.metrics.enabled }} + - port: {{ .Values.metrics.containerPort }} + {{- end }} {{- if not .Values.networkPolicy.allowExternal }} + from: - podSelector: matchLabels: - {{ include "zookeeper.fullname" . }}-client: "true" + {{ include "common.names.fullname" . }}-client: "true" - podSelector: - matchLabels: {{- include "zookeeper.matchLabels" . | nindent 14 }} - {{- else }} - - podSelector: - matchLabels: {} + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} {{- end }} - # Internal ports - - ports: &intranodes_ports - - port: {{ .Values.service.followerPort }} - - port: {{ .Values.service.electionPort }} + # Allow internal communications between nodes + - ports: + - port: {{ .Values.containerPorts.follower }} + - port: {{ .Values.containerPorts.election }} from: - podSelector: - matchLabels: {{- include "zookeeper.matchLabels" . | nindent 14 }} - egress: - - ports: *intranodes_ports - # Allow outbound connections from zookeeper nodes - + matchLabels: {{- include "common.labels.matchLabels" . | nindent 14 }} {{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/pdb.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/pdb.yaml new file mode 100644 index 000000000..f7faf65f9 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/pdb.yaml @@ -0,0 +1,26 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if and .Values.pdb.create (gt $replicaCount 1) }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml deleted file mode 100755 index 818950c66..000000000 --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- $replicaCount := int .Values.replicaCount }} -{{- if gt $replicaCount 1 }} -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ template "zookeeper.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: {{- include "zookeeper.labels" . | nindent 4 }} - app.kubernetes.io/component: zookeeper - {{- if .Values.commonLabels }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - selector: - matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} - app.kubernetes.io/component: zookeeper - {{- toYaml .Values.podDisruptionBudget | nindent 2 }} -{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/prometheusrule.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/prometheusrule.yaml new file mode 100644 index 000000000..87dcd3565 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/prometheusrule.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 8 }} +{{- end }} + diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml deleted file mode 100755 index 9cda3985c..000000000 --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml +++ /dev/null @@ -1,27 +0,0 @@ -{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ include "zookeeper.fullname" . }} - {{- if .Values.metrics.prometheusRule.namespace }} - namespace: {{ .Values.metrics.prometheusRule.namespace }} - {{- else }} - namespace: {{ .Release.Namespace }} - {{- end }} - labels: {{- include "zookeeper.labels" . | nindent 4 }} - app.kubernetes.io/component: zookeeper - {{- range $key, $value := .Values.metrics.prometheusRule.selector }} - {{ $key }}: {{ $value | quote }} - {{- end }} - {{- if .Values.commonLabels }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - groups: - - name: {{ include "zookeeper.fullname" . }} - rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 6 }} -{{- end }} - diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/scripts-configmap.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/scripts-configmap.yaml new file mode 100644 index 000000000..d0a7ddb49 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/scripts-configmap.yaml @@ -0,0 +1,102 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + init-certs.sh: |- + #!/bin/bash + + {{- if .Values.tls.client.enabled }} + if [[ -f "/certs/client/tls.key" ]] && [[ -f "/certs/client/tls.crt" ]] && [[ -f "/certs/client/ca.crt" ]]; then + if [[ -f "/opt/bitnami/zookeeper/config/certs/client/.initialized" ]]; then + exit 0 + fi + openssl pkcs12 -export -in "/certs/client/tls.crt" \ + -passout pass:"${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -inkey "/certs/client/tls.key" \ + -out "/tmp/keystore.p12" + keytool -importkeystore -srckeystore "/tmp/keystore.p12" \ + -srcstoretype PKCS12 \ + -srcstorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -deststorepass "${ZOO_TLS_CLIENT_KEYSTORE_PASSWORD}" \ + -destkeystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks" + rm "/tmp/keystore.p12" + keytool -import -file "/certs/client/ca.crt" \ + -keystore "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks" \ + -storepass "${ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD}" \ + -noprompt + touch /opt/bitnami/zookeeper/config/certs/client/.initialized + {{- if .Values.tls.client.autoGenerated }} + else + echo "Couldn't find the expected PEM certificates! They are mandatory when Client encryption via TLS is enabled." + exit 1 + fi + {{- else }} + elif [[ -f {{ printf "/certs/client/%s" (include "zookeeper.client.tlsTruststoreKey" .) | quote }} ]] && [[ -f {{ printf "/certs/client/%s" (include "zookeeper.client.tlsKeystoreKey" .) | quote }} ]]; then + cp {{ printf "/certs/client/%s" (include "zookeeper.client.tlsTruststoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks" + cp {{ printf "/certs/client/%s" (include "zookeeper.client.tlsKeystoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Client encryption via TLS is enabled." + exit 1 + fi + {{- end }} + {{- end }} + {{- if .Values.tls.quorum.enabled }} + if [[ -f "/certs/quorum/tls.key" ]] && [[ -f "/certs/quorum/tls.crt" ]] && [[ -f "/certs/quorum/ca.crt" ]]; then + openssl pkcs12 -export -in "/certs/quorum/tls.crt" \ + -passout pass:"${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -inkey "/certs/quorum/tls.key" \ + -out "/tmp/keystore.p12" + keytool -importkeystore -srckeystore "/tmp/keystore.p12" \ + -srcstoretype PKCS12 \ + -srcstorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -deststorepass "${ZOO_TLS_QUORUM_KEYSTORE_PASSWORD}" \ + -destkeystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks" + rm "/tmp/keystore.p12" + keytool -import -file "/certs/quorum/ca.crt" \ + -keystore "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks" \ + -storepass "${ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD}" \ + -noprompt + {{- if .Values.tls.quorum.autoGenerated }} + else + echo "Couldn't find the expected PEM certificates! They are mandatory when encryption Quorum via TLS is enabled." + exit 1 + fi + {{- else }} + elif [[ -f {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsTruststoreKey" .) | quote }} ]] && [[ -f {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsKeystoreKey" .) | quote }} ]]; then + cp {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsTruststoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks" + cp {{ printf "/certs/quorum/%s" (include "zookeeper.quorum.tlsKeystoreKey" .) | quote }} "/opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when Quorum encryption via TLS is enabled." + exit 1 + fi + {{- end }} + {{- end }} + setup.sh: |- + #!/bin/bash + + # Execute entrypoint as usual after obtaining ZOO_SERVER_ID + # check ZOO_SERVER_ID in persistent volume via myid + # if not present, set based on POD hostname + if [[ -f "/bitnami/zookeeper/data/myid" ]]; then + export ZOO_SERVER_ID="$(cat /bitnami/zookeeper/data/myid)" + else + HOSTNAME="$(hostname -s)" + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + ORD=${BASH_REMATCH[2]} + export ZOO_SERVER_ID="$((ORD + {{ .Values.minServerId }} ))" + else + echo "Failed to get index from hostname $HOST" + exit 1 + fi + fi + exec /entrypoint.sh /run.sh diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml old mode 100755 new mode 100644 index b3d727fec..82ebc2eed --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml @@ -1,18 +1,77 @@ -{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) -}} +{{- if (include "zookeeper.client.createSecret" .) }} apiVersion: v1 kind: Secret metadata: - name: {{ template "zookeeper.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: {{- include "zookeeper.labels" . | nindent 4 }} + name: {{ printf "%s-client-auth" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper {{- if .Values.commonLabels }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} type: Opaque data: - client-password: {{ include "zookeeper.clientPassword" . | b64enc | quote }} - server-password: {{ include "zookeeper.serverPasswords" . | b64enc | quote }} + client-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "client-password" "providedValues" (list "auth.client.clientPassword") "context" $) }} + server-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-client-auth" (include "common.names.fullname" .)) "key" "server-password" "providedValues" (list "auth.client.serverPasswords") "context" $) }} +{{- end }} +{{- if (include "zookeeper.quorum.createSecret" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-quorum-auth" (include "common.names.fullname" .) }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + quorum-learner-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-quorum-auth" (include "common.names.fullname" .)) "key" "quorum-learner-password" "providedValues" (list "auth.quorum.learnerPassword") "context" $) }} + quorum-server-password: {{ include "common.secrets.passwords.manage" (dict "secret" (printf "%s-quorum-auth" (include "common.names.fullname" .)) "key" "quorum-server-password" "providedValues" (list "auth.quorum.serverPasswords") "context" $) }} +{{- end }} +{{- if (include "zookeeper.client.createTlsPasswordsSecret" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-client-tls-pass + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + keystore-password: {{ default (randAlphaNum 10) .Values.tls.client.keystorePassword | b64enc | quote }} + truststore-password: {{ default (randAlphaNum 10) .Values.tls.client.truststorePassword | b64enc | quote }} +{{- end }} +{{- if (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "common.names.fullname" . }}-quorum-tls-pass + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + keystore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.keystorePassword | b64enc | quote }} + truststore-password: {{ default (randAlphaNum 10) .Values.tls.quorum.truststorePassword | b64enc | quote }} {{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml old mode 100755 new mode 100644 index 3f7ef39fd..958a57ac2 --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml @@ -3,13 +3,19 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "zookeeper.serviceAccountName" . }} - namespace: {{ .Release.Namespace }} - labels: {{- include "zookeeper.labels" . | nindent 4 }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: zookeeper role: zookeeper {{- if .Values.commonLabels }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} {{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml old mode 100755 new mode 100644 index 5782dad59..2c8af3350 --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml @@ -2,27 +2,33 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - name: {{ template "zookeeper.fullname" . }} + name: {{ template "common.names.fullname" . }} {{- if .Values.metrics.serviceMonitor.namespace }} namespace: {{ .Values.metrics.serviceMonitor.namespace }} {{- else }} namespace: {{ .Release.Namespace }} {{- end }} - labels: {{- include "zookeeper.labels" . | nindent 4 }} - app.kubernetes.io/component: zookeeper - {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} - {{ $key }}: {{ $value | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.additionalLabels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonLabels }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} selector: - matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} - app.kubernetes.io/component: zookeeper + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics endpoints: - port: tcp-metrics path: "/metrics" @@ -32,7 +38,16 @@ spec: {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} namespaceSelector: matchNames: - - {{ .Release.Namespace }} + - {{ template "zookeeper.namespace" . }} {{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml old mode 100755 new mode 100644 index fa1e5231f..aa4f1a971 --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml @@ -1,81 +1,100 @@ -apiVersion: apps/v1 +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} kind: StatefulSet metadata: - name: {{ template "zookeeper.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: {{- include "zookeeper.labels" . | nindent 4 }} + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: zookeeper role: zookeeper {{- if .Values.commonLabels }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} spec: - serviceName: {{ template "zookeeper.fullname" . }}-headless replicas: {{ .Values.replicaCount }} podManagementPolicy: {{ .Values.podManagementPolicy }} - updateStrategy: - type: {{ .Values.updateStrategy }} - {{- if (eq "Recreate" .Values.updateStrategy) }} - rollingUpdate: null - {{- else if .Values.rollingUpdatePartition }} - rollingUpdate: - partition: {{ .Values.rollingUpdatePartition }} - {{- end }} selector: - matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} app.kubernetes.io/component: zookeeper + serviceName: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.service.headless.servicenameOverride) | trunc 63 | trimSuffix "-" }} + {{- if .Values.updateStrategy }} + updateStrategy: {{- toYaml .Values.updateStrategy | nindent 4 }} + {{- end }} template: metadata: - name: {{ template "zookeeper.fullname" . }} - labels: {{- include "zookeeper.labels" . | nindent 8 }} + annotations: + {{- if .Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if (include "zookeeper.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if or (include "zookeeper.quorum.createSecret" .) (include "zookeeper.client.createSecret" .) (include "zookeeper.client.createTlsPasswordsSecret" .) (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} + checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if or (include "zookeeper.client.createTlsSecret" .) (include "zookeeper.quorum.createTlsSecret" .) }} + checksum/tls-secrets: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} app.kubernetes.io/component: zookeeper {{- if .Values.podLabels }} - {{- include "zookeeper.tplValue" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} {{- end }} - {{- if .Values.podAnnotations }} - annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} - {{- end }} spec: - {{- if .Values.schedulerName }} - schedulerName: {{ .Values.schedulerName }} - {{- end }} - {{- include "zookeeper.imagePullSecrets" . | nindent 6 }} serviceAccountName: {{ template "zookeeper.serviceAccountName" . }} - {{- if .Values.securityContext.enabled }} - securityContext: - fsGroup: {{ .Values.securityContext.fsGroup }} + {{- include "zookeeper.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} {{- end }} {{- if .Values.affinity }} - affinity: {{- include "zookeeper.tplValue" (dict "value" .Values.affinity "context" $) | nindent 8 }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "zookeeper" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "zookeeper" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} {{- end }} {{- if .Values.nodeSelector }} - nodeSelector: {{- include "zookeeper.tplValue" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} {{- end }} {{- if .Values.tolerations }} - tolerations: {{- include "zookeeper.tplValue" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" .) | nindent 8 }} {{- end }} {{- if .Values.priorityClassName }} priorityClassName: {{ .Values.priorityClassName }} {{- end }} - {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} - name: volume-permissions image: {{ template "zookeeper.volumePermissions.image" . }} imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} command: - - chown + - /bin/bash args: - - -R - - {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} - - /bitnami/zookeeper - {{- if .Values.dataLogDir }} - - {{ .Values.dataLogDir }} - {{- end }} - securityContext: - runAsUser: 0 + - -ec + - | + mkdir -p /bitnami/zookeeper + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} /bitnami/zookeeper + find /bitnami/zookeeper -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if .Values.dataLogDir }} + mkdir -p {{ .Values.dataLogDir }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} {{ .Values.dataLogDir }} + find {{ .Values.dataLogDir }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- end }} + {{- if .Values.volumePermissions.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} {{- if .Values.volumePermissions.resources }} resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} {{- end }} @@ -86,43 +105,106 @@ spec: - name: data-log mountPath: {{ .Values.dataLogDir }} {{- end }} - {{- end }} + {{- end }} + {{- if or .Values.tls.client.enabled .Values.tls.quorum.enabled }} + - name: init-certs + image: {{ include "zookeeper.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /scripts/init-certs.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if or .Values.tls.client.passwordsSecretName (include "zookeeper.client.createTlsPasswordsSecret" .) }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordKeystoreKey" . }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- if or .Values.tls.quorum.passwordsSecretName (include "zookeeper.quorum.createTlsPasswordsSecret" .) }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordKeystoreKey" . }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- if .Values.tls.resources }} + resources: {{- toYaml .Values.tls.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: scripts + mountPath: /scripts/init-certs.sh + subPath: init-certs.sh + {{- if or .Values.tls.client.enabled }} + - name: client-certificates + mountPath: /certs/client + - name: client-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/client + {{- end }} + {{- if or .Values.tls.quorum.enabled }} + - name: quorum-certificates + mountPath: /certs/quorum + - name: quorum-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/quorum + {{- end }} + {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.initContainers "context" $) | trim | nindent 8 }} + {{- end }} containers: - name: zookeeper image: {{ template "zookeeper.image" . }} imagePullPolicy: {{ .Values.image.pullPolicy | quote }} - {{- if .Values.securityContext.enabled }} - securityContext: - runAsUser: {{ .Values.securityContext.runAsUser }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} {{- end }} - command: - - bash - - -ec - - | - # Execute entrypoint as usual after obtaining ZOO_SERVER_ID based on POD hostname - HOSTNAME=`hostname -s` - if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then - ORD=${BASH_REMATCH[2]} - export ZOO_SERVER_ID=$((ORD+1)) - else - echo "Failed to get index from hostname $HOST" - exit 1 - fi - exec /entrypoint.sh /run.sh {{- if .Values.resources }} resources: {{- toYaml .Values.resources | nindent 12 }} {{- end }} env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} - name: ZOO_DATA_LOG_DIR value: {{ .Values.dataLogDir | quote }} - name: ZOO_PORT_NUMBER - value: {{ .Values.service.port | quote }} + value: {{ .Values.containerPorts.client | quote }} - name: ZOO_TICK_TIME value: {{ .Values.tickTime | quote }} - name: ZOO_INIT_LIMIT value: {{ .Values.initLimit | quote }} - name: ZOO_SYNC_LIMIT value: {{ .Values.syncLimit | quote }} + - name: ZOO_PRE_ALLOC_SIZE + value: {{ .Values.preAllocSize | quote }} + - name: ZOO_SNAPCOUNT + value: {{ .Values.snapCount | quote }} - name: ZOO_MAX_CLIENT_CNXNS value: {{ .Values.maxClientCnxns | quote }} - name: ZOO_4LW_COMMANDS_WHITELIST @@ -137,37 +219,56 @@ spec: value: {{ .Values.maxSessionTimeout | quote }} - name: ZOO_SERVERS {{- $replicaCount := int .Values.replicaCount }} - {{- $followerPort := int .Values.service.followerPort }} - {{- $electionPort := int .Values.service.electionPort }} - {{- $releaseNamespace := .Release.Namespace }} - {{- $zookeeperFullname := include "zookeeper.fullname" . }} + {{- $minServerId := int .Values.minServerId }} + {{- $followerPort := int .Values.containerPorts.follower }} + {{- $electionPort := int .Values.containerPorts.election }} + {{- $releaseNamespace := include "zookeeper.namespace" . }} + {{- $zookeeperFullname := include "common.names.fullname" . }} {{- $zookeeperHeadlessServiceName := printf "%s-%s" $zookeeperFullname "headless" | trunc 63 }} {{- $clusterDomain := .Values.clusterDomain }} - value: {{ range $i, $e := until $replicaCount }}{{ $zookeeperFullname }}-{{ $e }}.{{ $zookeeperHeadlessServiceName }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $followerPort }}:{{ $electionPort }} {{ end }} + value: {{ range $i, $e := until $replicaCount }}{{ $zookeeperFullname }}-{{ $e }}.{{ $zookeeperHeadlessServiceName }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $followerPort }}:{{ $electionPort }}::{{ add $e $minServerId }} {{ end }} - name: ZOO_ENABLE_AUTH - value: {{ ternary "yes" "no" .Values.auth.enabled | quote }} - {{- if .Values.auth.enabled }} + value: {{ ternary "yes" "no" .Values.auth.client.enabled | quote }} + {{- if .Values.auth.client.enabled }} - name: ZOO_CLIENT_USER - value: {{ .Values.auth.clientUser | quote }} + value: {{ .Values.auth.client.clientUser | quote }} - name: ZOO_CLIENT_PASSWORD valueFrom: secretKeyRef: - name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "zookeeper.fullname" . }}{{ end }} + name: {{ include "zookeeper.client.secretName" . }} key: client-password - name: ZOO_SERVER_USERS - value: {{ .Values.auth.serverUsers | quote }} + value: {{ .Values.auth.client.serverUsers | quote }} - name: ZOO_SERVER_PASSWORDS valueFrom: secretKeyRef: - name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "zookeeper.fullname" . }}{{ end }} + name: {{ include "zookeeper.client.secretName" . }} key: server-password {{- end }} + - name: ZOO_ENABLE_QUORUM_AUTH + value: {{ ternary "yes" "no" .Values.auth.quorum.enabled | quote }} + {{- if .Values.auth.quorum.enabled }} + - name: ZOO_QUORUM_LEARNER_USER + value: {{ .Values.auth.quorum.learnerUser | quote }} + - name: ZOO_QUORUM_LEARNER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.secretName" . }} + key: quorum-learner-password + - name: ZOO_QUORUM_SERVER_USERS + value: {{ .Values.auth.quorum.serverUsers | quote }} + - name: ZOO_QUORUM_SERVER_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.secretName" . }} + key: quorum-server-password + {{- end }} - name: ZOO_HEAP_SIZE value: {{ .Values.heapSize | quote }} - name: ZOO_LOG_LEVEL value: {{ .Values.logLevel | quote }} - name: ALLOW_ANONYMOUS_LOGIN - value: {{ ternary "yes" "no" .Values.allowAnonymousLogin | quote }} + value: {{ ternary "no" "yes" .Values.auth.client.enabled | quote }} {{- if .Values.jvmFlags }} - name: JVMFLAGS value: {{ .Values.jvmFlags | quote }} @@ -178,103 +279,174 @@ spec: - name: ZOO_PROMETHEUS_METRICS_PORT_NUMBER value: {{ .Values.metrics.containerPort | quote }} {{- end }} - {{- if .Values.service.tls.client_enable }} + {{- if .Values.tls.client.enabled }} + - name: ZOO_TLS_PORT_NUMBER + value: {{ .Values.containerPorts.tls | quote }} - name: ZOO_TLS_CLIENT_ENABLE - value: {{ .Values.service.tls.client_enable | quote }} + value: {{ .Values.tls.client.enabled | quote }} + - name: ZOO_TLS_CLIENT_AUTH + value: {{ .Values.tls.client.auth | quote }} - name: ZOO_TLS_CLIENT_KEYSTORE_FILE - value: {{ .Values.service.tls.client_keystore_path | quote }} - - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD - value: {{ .Values.service.tls.client_keystore_password | quote }} + value: {{ .Values.tls.client.keystorePath | quote }} - name: ZOO_TLS_CLIENT_TRUSTSTORE_FILE - value: {{ .Values.service.tls.client_truststore_path | quote }} + value: {{ .Values.tls.client.truststorePath | quote }} + {{- if or .Values.tls.client.keystorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordKeystoreKey" . }} + {{- end }} + {{- if or .Values.tls.client.truststorePassword .Values.tls.client.passwordsSecretName .Values.tls.client.autoGenerated }} - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD - value: {{ .Values.service.tls.client_truststore_password | quote }} - {{ end }} - {{- if .Values.service.tls.quorum_enable }} + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.client.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.client.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.quorum.enabled }} - name: ZOO_TLS_QUORUM_ENABLE - value: {{ .Values.service.tls.quorum_enable | quote }} + value: {{ .Values.tls.quorum.enabled | quote }} + - name: ZOO_TLS_QUORUM_CLIENT_AUTH + value: {{ .Values.tls.quorum.auth | quote }} - name: ZOO_TLS_QUORUM_KEYSTORE_FILE - value: {{ .Values.service.tls.quorum_keystore_path | quote }} - - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD - value: {{ .Values.service.tls.quorum_keystore_password | quote }} + value: {{ .Values.tls.quorum.keystorePath | quote }} - name: ZOO_TLS_QUORUM_TRUSTSTORE_FILE - value: {{ .Values.service.tls.quorum_truststore_path | quote }} + value: {{ .Values.tls.quorum.truststorePath | quote }} + {{- if or .Values.tls.quorum.keystorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordKeystoreKey" . }} + {{- end }} + {{- if or .Values.tls.quorum.truststorePassword .Values.tls.quorum.passwordsSecretName .Values.tls.quorum.autoGenerated }} - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD - value: {{ .Values.service.tls.quorum_truststore_password | quote }} - {{ end }} + valueFrom: + secretKeyRef: + name: {{ include "zookeeper.quorum.tlsPasswordsSecret" . }} + key: {{ include "zookeeper.quorum.tlsPasswordTruststoreKey" . }} + {{- end }} + {{- end }} - name: POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name {{- if .Values.extraEnvVars }} - {{- toYaml .Values.extraEnvVars | nindent 12 }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} ports: - {{ if not .Values.service.tls.disable_base_client_port }} + {{- if not .Values.service.disableBaseClientPort }} - name: client - containerPort: {{ .Values.service.port }} - {{ end }} - {{ if .Values.service.tls.client_enable }} + containerPort: {{ .Values.containerPorts.client }} + {{- end }} + {{- if .Values.tls.client.enabled }} - name: client-tls - containerPort: {{ .Values.service.tls.client_port }} - {{ end }} + containerPort: {{ .Values.containerPorts.tls }} + {{- end }} - name: follower - containerPort: {{ .Values.service.followerPort }} + containerPort: {{ .Values.containerPorts.follower }} - name: election - containerPort: {{ .Values.service.electionPort }} + containerPort: {{ .Values.containerPorts.election }} {{- if .Values.metrics.enabled }} - name: metrics containerPort: {{ .Values.metrics.containerPort }} {{- end }} - {{- if .Values.livenessProbe.enabled }} - livenessProbe: + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }} exec: - {{- if not .Values.service.tls.disable_base_client_port }} - command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} nc -w {{ .Values.livenessProbe.probeCommandTimeout }} localhost {{ .Values.service.port }} | grep imok'] + {{- if not .Values.service.disableBaseClientPort }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} nc -w {{ .Values.livenessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok'] + {{- else if not .Values.tls.client.enabled }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok'] {{- else }} - command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.service.tls.client_port }} | grep imok'] + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok'] {{- end }} - initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} - successThreshold: {{ .Values.livenessProbe.successThreshold }} - failureThreshold: {{ .Values.livenessProbe.failureThreshold }} {{- end }} - {{- if .Values.readinessProbe.enabled }} - readinessProbe: + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled" "probeCommandTimeout") "context" $) | nindent 12 }} exec: - {{- if not .Values.service.tls.disable_base_client_port }} - command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} nc -w {{ .Values.readinessProbe.probeCommandTimeout }} localhost {{ .Values.service.port }} | grep imok'] + {{- if not .Values.service.disableBaseClientPort }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} nc -w {{ .Values.readinessProbe.probeCommandTimeout }} localhost {{ .Values.containerPorts.client }} | grep imok'] + {{- else if not .Values.tls.client.enabled }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} | grep imok'] {{- else }} - command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.service.tls.client_port }} | grep imok'] + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.containerPorts.tls }} -cert {{ .Values.service.tls.client_cert_pem_path }} -key {{ .Values.service.tls.client_key_pem_path }} | grep imok'] {{- end }} - initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} - successThreshold: {{ .Values.readinessProbe.successThreshold }} - failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + {{- if not .Values.service.disableBaseClientPort }} + port: client + {{- else }} + port: follower + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} {{- end }} volumeMounts: + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh - name: data mountPath: /bitnami/zookeeper {{- if .Values.dataLogDir }} - name: data-log mountPath: {{ .Values.dataLogDir }} {{- end }} - {{- if .Values.config }} + {{- if or .Values.configuration .Values.existingConfigmap }} - name: config mountPath: /opt/bitnami/zookeeper/conf/zoo.cfg subPath: zoo.cfg {{- end }} - {{- if .Values.extraVolumeMounts }} - {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- if .Values.tls.client.enabled }} + - name: client-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/client + readOnly: true {{- end }} + {{- if .Values.tls.quorum.enabled }} + - name: quorum-shared-certs + mountPath: /opt/bitnami/zookeeper/config/certs/quorum + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sidecars "context" $ ) | nindent 8 }} + {{- end }} volumes: - {{- if .Values.config }} + - name: scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + defaultMode: 0755 + {{- if or .Values.configuration .Values.existingConfigmap }} - name: config configMap: - name: {{ template "zookeeper.fullname" . }} + name: {{ include "zookeeper.configmapName" . }} {{- end }} {{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} - name: data @@ -292,17 +464,35 @@ spec: - name: data-log emptyDir: {} {{- end }} - {{- if .Values.extraVolumes }} - {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- if .Values.tls.client.enabled }} + - name: client-certificates + secret: + secretName: {{ include "zookeeper.client.tlsSecretName" . }} + defaultMode: 256 + - name: client-shared-certs + emptyDir: {} {{- end }} - {{- if and .Values.persistence.enabled (not (and .Values.persistence.existingClaim .Values.persistence.dataLogDir.existingClaim) )}} + {{- if .Values.tls.quorum.enabled }} + - name: quorum-certificates + secret: + secretName: {{ include "zookeeper.quorum.tlsSecretName" . }} + defaultMode: 256 + - name: quorum-shared-certs + emptyDir: {} + {{- end }} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and .Values.persistence.enabled (not (and .Values.persistence.existingClaim .Values.persistence.dataLogDir.existingClaim) ) }} volumeClaimTemplates: - {{- if not .Values.persistence.existingClaim }} + {{- if not .Values.persistence.existingClaim }} - metadata: name: data - annotations: - {{- range $key, $value := .Values.persistence.annotations }} - {{ $key }}: {{ $value }} + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }} {{- end }} spec: accessModes: @@ -312,14 +502,19 @@ spec: resources: requests: storage: {{ .Values.persistence.size | quote }} - {{- include "zookeeper.storageClass" . | nindent 8 }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end }} {{- end }} {{- if and (not .Values.persistence.dataLogDir.existingClaim) .Values.dataLogDir }} - metadata: name: data-log - annotations: - {{- range $key, $value := .Values.persistence.annotations }} - {{ $key }}: {{ $value }} + {{- if .Values.persistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }} {{- end }} spec: accessModes: @@ -329,6 +524,9 @@ spec: resources: requests: storage: {{ .Values.persistence.dataLogDir.size | quote }} - {{- include "zookeeper.storageClass" . | nindent 8 }} + {{- include "common.storage.class" (dict "persistence" .Values.persistence "global" .Values.global) | nindent 8 }} + {{- if .Values.persistence.dataLogDir.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.dataLogDir.selector "context" $) | nindent 10 }} + {{- end }} {{- end }} {{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml old mode 100755 new mode 100644 index 972efb51d..e7ab496cf --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml @@ -1,42 +1,42 @@ apiVersion: v1 kind: Service metadata: - name: {{ template "zookeeper.fullname" . }}-headless - namespace: {{ .Release.Namespace }} - labels: {{- include "zookeeper.labels" . | nindent 4 }} + name: {{ printf "%s-%s" (include "common.names.fullname" .) (default "headless" .Values.service.headless.servicenameOverride) | trunc 63 | trimSuffix "-" }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: zookeeper {{- if .Values.commonLabels }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} - {{- if or .Values.commonAnnotations .Values.service.annotations }} + {{- if or .Values.commonAnnotations .Values.service.headless.annotations }} annotations: {{- if .Values.service.headless.annotations }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.service.headless.annotations "context" $ ) | nindent 4 }}\ + {{- include "common.tplvalues.render" ( dict "value" .Values.service.headless.annotations "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} {{- end }} spec: type: ClusterIP clusterIP: None - publishNotReadyAddresses: {{ .Values.service.publishNotReadyAddresses }} + publishNotReadyAddresses: {{ .Values.service.headless.publishNotReadyAddresses }} ports: - {{ if not .Values.service.tls.disable_base_client_port }} + {{- if not .Values.service.disableBaseClientPort }} - name: tcp-client - port: 2181 + port: {{ .Values.service.ports.client }} targetPort: client - {{ end }} - {{ if .Values.service.tls.client_enable }} + {{- end }} + {{- if .Values.tls.client.enabled }} - name: tcp-client-tls - port: {{ .Values.service.tls.client_port }} + port: {{ .Values.service.ports.tls }} targetPort: client-tls - {{ end }} - - name: follower - port: 2888 + {{- end }} + - name: tcp-follower + port: {{ .Values.service.ports.follower }} targetPort: follower - name: tcp-election - port: 3888 + port: {{ .Values.service.ports.election }} targetPort: election - selector: {{- include "zookeeper.matchLabels" . | nindent 4 }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} app.kubernetes.io/component: zookeeper diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc.yaml old mode 100755 new mode 100644 index da3a2895a..6ad0b1096 --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc.yaml +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc.yaml @@ -1,40 +1,71 @@ apiVersion: v1 kind: Service metadata: - name: {{ template "zookeeper.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: {{- include "zookeeper.labels" . | nindent 4 }} + name: {{ template "common.names.fullname" . }} + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: zookeeper {{- if .Values.commonLabels }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if or .Values.commonAnnotations .Values.service.annotations }} annotations: {{- if .Values.service.annotations }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }}\ + {{- include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} {{- end }} spec: type: {{ .Values.service.type }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} ports: - {{ if not .Values.service.tls.disable_base_client_port }} + {{- if not .Values.service.disableBaseClientPort }} - name: tcp-client - port: 2181 + port: {{ .Values.service.ports.client }} targetPort: client - {{ end }} - {{ if .Values.service.tls.client_enable }} + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + {{- if .Values.tls.client.enabled }} - name: tcp-client-tls - port: {{ .Values.service.tls.client_port }} + port: {{ .Values.service.ports.tls }} targetPort: client-tls - {{ end }} - - name: follower - port: 2888 + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tls)) }} + nodePort: {{ .Values.service.nodePorts.tls }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + - name: tcp-follower + port: {{ .Values.service.ports.follower }} targetPort: follower - name: tcp-election - port: 3888 + port: {{ .Values.service.ports.election }} targetPort: election - selector: {{- include "zookeeper.matchLabels" . | nindent 4 }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} app.kubernetes.io/component: zookeeper diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/tls-secrets.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/tls-secrets.yaml new file mode 100644 index 000000000..a07480d55 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/tls-secrets.yaml @@ -0,0 +1,55 @@ +{{- if (include "zookeeper.client.createTlsSecret" .) }} +{{- $ca := genCA "zookeeper-client-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) "127.0.0.1" "localhost" $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-client-crt + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} +{{- if (include "zookeeper.quorum.createTlsSecret" .) }} +{{- $ca := genCA "zookeeper-quorum-ca" 365 }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $serviceName := include "common.names.fullname" . }} +{{- $headlessServiceName := printf "%s-headless" (include "common.names.fullname" .) }} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $headlessServiceName $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $serviceName $releaseNamespace $clusterDomain) $fullname }} +{{- $crt := genSignedCert $fullname nil $altNames 365 $ca }} +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }}-quorum-crt + namespace: {{ template "zookeeper.namespace" . }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + ca.crt: {{ $ca.Cert | b64enc | quote }} + tls.crt: {{ $crt.Cert | b64enc | quote }} + tls.key: {{ $crt.Key | b64enc | quote }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/values-production.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/values-production.yaml deleted file mode 100755 index 7d678603f..000000000 --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/values-production.yaml +++ /dev/null @@ -1,430 +0,0 @@ -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets -## -# global: -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName -# storageClass: myStorageClass - -## Bitnami Zookeeper image version -## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ -## -image: - registry: docker.io - repository: bitnami/zookeeper - tag: 3.6.2-debian-10-r10 - - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - ## Set to true if you would like to see extra information on logs - ## It turns BASH and NAMI debugging in minideb - ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging - ## - debug: false - -## String to partially override zookeeper.fullname template (will maintain the release name) -# nameOverride: - -## String to fully override zookeeper.fullname template -# fullnameOverride: - -## Kubernetes Cluster Domain -## -clusterDomain: cluster.local - -## Add labels to all the deployed resources -## -commonLabels: {} - -## Add annotations to all the deployed resources -## -commonAnnotations: {} - -## Init containers parameters: -## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. -## -volumePermissions: - enabled: false - image: - registry: docker.io - repository: bitnami/minideb - tag: buster - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistryKeySecretName - resources: {} - -## extraVolumes and extraVolumeMounts allows you to mount other volumes -## Example Use Cases: -## mount certificates to enable tls -# extraVolumes: -# - name: zookeeper-keystore -# secret: -# defaultMode: 288 -# secretName: zookeeper-keystore -# - name: zookeeper-trustsore -# secret: -# defaultMode: 288 -# secretName: zookeeper-truststore -# extraVolumeMounts: -# - name: zookeeper-keystore -# mountPath: /certs/keystore -# readOnly: true -# - name: zookeeper-truststore -# mountPath: /certs/truststore -# readOnly: true - - -## StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete -## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets -## -updateStrategy: RollingUpdate - -## Limits the number of pods of the replicated application that are down simultaneously from voluntary disruptions -## The PDB will only be created if replicaCount is greater than 1 -## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions -## -podDisruptionBudget: - maxUnavailable: 1 - -## Partition update strategy -## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions -## -# rollingUpdatePartition: - -## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel -## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy -## -podManagementPolicy: Parallel - -## Number of ZooKeeper nodes -## -replicaCount: 3 - -## Basic time unit in milliseconds used by ZooKeeper for heartbeats -## -tickTime: 2000 - -## ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader -## -initLimit: 10 - -## How far out of date a server can be from a leader -## -syncLimit: 5 - -## Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble -## -maxClientCnxns: 60 - -## Maximum session timeout in milliseconds that the server will allow the client to negotiate. Defaults to 20 times the tickTime. -## -maxSessionTimeout: 40000 - -## A list of comma separated Four Letter Words commands to use -## -fourlwCommandsWhitelist: srvr, mntr, ruok - -## Allow zookeeper to listen for peers on all IPs -## -listenOnAllIPs: false - -## Allow to accept connections from unauthenticated users -## -allowAnonymousLogin: true - -autopurge: - ## Retains the snapRetainCount most recent snapshots and the corresponding transaction logs and deletes the rest - ## - snapRetainCount: 3 - ## The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging. - ## - purgeInterval: 0 - -auth: - ## Use existing secret (ignores previous password) - ## - # existingSecret: - ## Enable Zookeeper auth. It uses SASL/Digest-MD5 - ## - enabled: false - ## User that will use Zookeeper clients to auth - ## - clientUser: - ## Password that will use Zookeeper clients to auth - ## - clientPassword: - ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" - ## - serverUsers: - ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" - ## - serverPasswords: - -## Size in MB for the Java Heap options (Xmx and XMs). This env var is ignored if Xmx an Xms are configured via JVMFLAGS -## -heapSize: 1024 - -## Log level for the Zookeeper server. ERROR by default. Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs. -## -logLevel: ERROR - -## Data log directory. Specifying this option will direct zookeeper to write the transaction log to the dataLogDir rather than the dataDir. -## This allows a dedicated log device to be used, and helps avoid competition between logging and snaphots. -## Example: -## dataLogDir: /bitnami/zookeeper/dataLog -## -dataLogDir: "" - -## Default JVMFLAGS for the ZooKeeper process -## -# jvmFlags: - -## Configure ZooKeeper with a custom zoo.cfg file -## -# config: - -## Kubernetes configuration -## For minikube, set this to NodePort, elsewhere use LoadBalancer -## -service: - type: ClusterIP - port: 2181 - followerPort: 2888 - electionPort: 3888 - publishNotReadyAddresses: true - tls: - client_enable: true - quorum_enable: true - disable_base_client_port: true - - client_port: 3181 - - client_keystore_path: /tls_key_store/key_store_file - client_keystore_password: "" - client_truststore_path: /tls_trust_store/trust_store_file - client_truststore_password: "" - - quorum_keystore_path: /tls_key_store/key_store_file - quorum_keystore_password: "" - quorum_truststore_path: /tls_trust_store/trust_store_file - quorum_truststore_password: "" - annotations: {} - headless: - annotations: {} - -## Service account for Zookeeper to use. -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -## -serviceAccount: - ## Specifies whether a ServiceAccount should be created - ## - create: false - ## The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the zookeeper.fullname template - # name: - -## Zookeeper Pod Security Context -securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - -## Zookeeper data Persistent Volume Storage Class -## If defined, storageClassName: -## If set to "-", storageClassName: "", which disables dynamic provisioning -## If undefined (the default) or set to null, no storageClassName spec is -## set, choosing the default provisioner. (gp2 on AWS, standard on -## GKE, AWS & OpenStack) -## -persistence: - enabled: true - ## A manually managed Persistent Volume and Claim - ## If defined, PVC must be created manually before volume will be bound - ## The value is evaluated as a template - ## - # existingClaim: - - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 8Gi - annotations: {} - dataLogDir: - size: 8Gi - ## A manually managed Persistent Volume and Claim - ## If defined, PVC must be created manually before volume will be bound - ## The value is evaluated as a template - ## - # existingClaim: - -## Node labels for pod assignment -## Ref: https://kubernetes.io/docs/user-guide/node-selection/ -## -nodeSelector: {} - -## Tolerations for pod assignment -## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] - -## Labels -## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ -podLabels: {} - -## Annotations -## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ -podAnnotations: {} - -## Name of the priority class to be used by zookeeper pods, priority class needs to be created beforehand -## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ -priorityClassName: "" - -## Affinity for pod assignment -## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## -affinity: {} - -## Scheduler name -## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -# schedulerName: stork - -## Configure resource requests and limits -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## -resources: - requests: - memory: 256Mi - cpu: 250m - -## Configure extra options for liveness and readiness probes -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) -## -livenessProbe: - enabled: true - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - probeCommandTimeout: 2 - -readinessProbe: - enabled: true - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - failureThreshold: 6 - successThreshold: 1 - probeCommandTimeout: 2 - -## Network policies -## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ -## -networkPolicy: - ## Specifies whether a NetworkPolicy should be created - ## - enabled: true - - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to the port Redis is listening - ## on. When true, zookeeper accept connections from any source - ## (with the correct destination port). - ## - allowExternal: true - -## Zookeeper Prometheus Exporter configuration -## -metrics: - enabled: false - - ## Zookeeper Prometheus Exporter container port - ## - containerPort: 9141 - - ## Service configuration - ## - service: - ## Zookeeper Prometheus Exporter service type - ## - type: ClusterIP - ## Zookeeper Prometheus Exporter service port - ## - port: 9141 - ## Annotations for the Zookeeper Prometheus Exporter metrics service - ## - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.service.port }}" - prometheus.io/path: "/metrics" - - ## Prometheus Operator ServiceMonitor configuration - ## - serviceMonitor: - enabled: false - ## Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) - ## - namespace: - - ## Interval at which metrics should be scraped. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - # interval: 10s - - ## Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - # scrapeTimeout: 10s - - ## ServiceMonitor selector labels - ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration - ## - # selector: - # prometheus: my-prometheus - - ## Prometheus Operator PrometheusRule configuration - ## - prometheusRule: - enabled: false - ## Namespace for the PrometheusRule Resource (defaults to the Release Namespace) - ## - namespace: - - ## PrometheusRule selector labels - ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration - ## - # selector: - # prometheus: my-prometheus - - ## Some example rules. - rules: [] - # - alert: ZookeeperSyncedFollowers - # annotations: - # message: The number of synced followers for the leader node in Zookeeper deployment my-release is less than 2. This usually means that some of the Zookeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). - # expr: max(synced_followers{service="my-release-metrics"}) < 2 - # for: 5m - # labels: - # severity: critical - # - alert: ZookeeperOutstandingRequests - # annotations: - # message: The number of outstanding requests for Zookeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. - # expr: outstanding_requests{service="my-release-metrics"} > 10 - # for: 5m - # labels: - # severity: critical diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/values.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/values.yaml old mode 100755 new mode 100644 index a40decb54..b9d59000d --- a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/values.yaml +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/values.yaml @@ -1,320 +1,262 @@ +## @section Global parameters ## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass ## -# global: -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName -# storageClass: myStorageClass -## Bitnami Zookeeper image version +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: cluster.local +## @param extraDeploy Extra objects to deploy (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## @param namespaceOverride Override namespace for ZooKeeper resources +## Useful when including ZooKeeper as a chart dependency, so it can be released into a different namespace than the parent +## +namespaceOverride: "" + +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section ZooKeeper chart parameters + +## Bitnami ZooKeeper image version ## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ +## @param image.registry ZooKeeper image registry +## @param image.repository ZooKeeper image repository +## @param image.tag ZooKeeper image tag (immutable tags are recommended) +## @param image.digest ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy ZooKeeper image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug values should be set ## image: registry: docker.io repository: bitnami/zookeeper - tag: 3.6.2-debian-10-r10 - + tag: 3.8.0-debian-11-r74 + digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName ## - # pullSecrets: - # - myRegistryKeySecretName + pullSecrets: [] ## Set to true if you would like to see extra information on logs - ## It turns BASH and NAMI debugging in minideb - ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging ## debug: false - -## String to partially override zookeeper.fullname template (will maintain the release name) -# nameOverride: - -## String to fully override zookeeper.fullname template -# fullnameOverride: - -## Kubernetes Cluster Domain +## Authentication parameters ## -clusterDomain: cluster.local - -## Add labels to all the deployed resources -## -commonLabels: {} - -## Add annotations to all the deployed resources -## -commonAnnotations: {} - -## Init containers parameters: -## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. -## -volumePermissions: - enabled: false - image: - registry: docker.io - repository: bitnami/minideb - tag: buster - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +auth: + client: + ## @param auth.client.enabled Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5 ## - # pullSecrets: - # - myRegistryKeySecretName - resources: {} - -## extraVolumes and extraVolumeMounts allows you to mount other volumes -## Example Use Cases: -## mount certificates to enable tls -# extraVolumes: -# - name: zookeeper-keystore -# secret: -# defaultMode: 288 -# secretName: zookeeper-keystore -# - name: zookeeper-trustsore -# secret: -# defaultMode: 288 -# secretName: zookeeper-truststore -# extraVolumeMounts: -# - name: zookeeper-keystore -# mountPath: /certs/keystore -# readOnly: true -# - name: zookeeper-truststore -# mountPath: /certs/truststore -# readOnly: true - -## StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete -## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets -## -updateStrategy: RollingUpdate - -## Limits the number of pods of the replicated application that are down simultaneously from voluntary disruptions -## The PDB will only be created if replicaCount is greater than 1 -## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions -## -podDisruptionBudget: - maxUnavailable: 1 - -## Partition update strategy -## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions -## -# rollingUpdatePartition: - -## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel -## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy -## -podManagementPolicy: Parallel - -## Number of ZooKeeper nodes -## -replicaCount: 1 - -## Basic time unit in milliseconds used by ZooKeeper for heartbeats + enabled: false + ## @param auth.client.clientUser User that will use ZooKeeper clients to auth + ## + clientUser: "" + ## @param auth.client.clientPassword Password that will use ZooKeeper clients to auth + ## + clientPassword: "" + ## @param auth.client.serverUsers Comma, semicolon or whitespace separated list of user to be created + ## Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param auth.client.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created + ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## @param auth.client.existingSecret Use existing secret (ignores previous passwords) + ## + existingSecret: "" + quorum: + ## @param auth.quorum.enabled Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5 + ## + enabled: false + ## @param auth.quorum.learnerUser User that the ZooKeeper quorumLearner will use to authenticate to quorumServers. + ## Note: Make sure the user is included in auth.quorum.serverUsers + ## + learnerUser: "" + ## @param auth.quorum.learnerPassword Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers. + ## + learnerPassword: "" + ## @param auth.quorum.serverUsers Comma, semicolon or whitespace separated list of users for the quorumServers. + ## Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param auth.quorum.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created + ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## @param auth.quorum.existingSecret Use existing secret (ignores previous passwords) + ## + existingSecret: "" +## @param tickTime Basic time unit (in milliseconds) used by ZooKeeper for heartbeats ## tickTime: 2000 - -## ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader +## @param initLimit ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader ## initLimit: 10 - -## How far out of date a server can be from a leader +## @param syncLimit How far out of date a server can be from a leader ## syncLimit: 5 - -## Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble +## @param preAllocSize Block size for transaction log file +## +preAllocSize: 65536 +## @param snapCount The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) +## +snapCount: 100000 +## @param maxClientCnxns Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble ## maxClientCnxns: 60 - -## A list of comma separated Four Letter Words commands to use -## -fourlwCommandsWhitelist: srvr, mntr, ruok - -## Allow zookeeper to listen for peers on all IPs -## -listenOnAllIPs: false - -## Allow to accept connections from unauthenticated users -## -allowAnonymousLogin: true - -autopurge: - ## Retains the snapRetainCount most recent snapshots and the corresponding transaction logs and deletes the rest - ## - snapRetainCount: 3 - ## The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging. - ## - purgeInterval: 0 - -## Maximum session timeout in milliseconds that the server will allow the client to negotiate. Defaults to 20 times the tickTime. +## @param maxSessionTimeout Maximum session timeout (in milliseconds) that the server will allow the client to negotiate +## Defaults to 20 times the tickTime ## maxSessionTimeout: 40000 - -auth: - ## Use existing secret (ignores previous password) - ## - # existingSecret: - ## Enable Zookeeper auth. It uses SASL/Digest-MD5 - ## - enabled: false - ## User that will use Zookeeper clients to auth - ## - clientUser: - ## Password that will use Zookeeper clients to auth - ## - clientPassword: - ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" - ## - serverUsers: - ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" - ## - serverPasswords: - -## Size in MB for the Java Heap options (Xmx and XMs). This env var is ignored if Xmx an Xms are configured via JVMFLAGS +## @param heapSize Size (in MB) for the Java Heap options (Xmx and Xms) +## This env var is ignored if Xmx an Xms are configured via `jvmFlags` ## heapSize: 1024 - -## Log level for the Zookeeper server. ERROR by default. Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs. +## @param fourlwCommandsWhitelist A list of comma separated Four Letter Words commands that can be executed +## +fourlwCommandsWhitelist: srvr, mntr, ruok +## @param minServerId Minimal SERVER_ID value, nodes increment their IDs respectively +## Servers increment their ID starting at this minimal value. +## E.g., with `minServerId=10` and 3 replicas, server IDs will be 10, 11, 12 for z-0, z-1 and z-2 respectively. +## +minServerId: 1 +## @param listenOnAllIPs Allow ZooKeeper to listen for connections from its peers on all available IP addresses +## +listenOnAllIPs: false +## Ongoing data directory cleanup configuration +## +autopurge: + ## @param autopurge.snapRetainCount The most recent snapshots amount (and corresponding transaction logs) to retain + ## + snapRetainCount: 3 + ## @param autopurge.purgeInterval The time interval (in hours) for which the purge task has to be triggered + ## Set to a positive integer to enable the auto purging + ## + purgeInterval: 0 +## @param logLevel Log level for the ZooKeeper server. ERROR by default +## Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs ## logLevel: ERROR - -## Data log directory. Specifying this option will direct zookeeper to write the transaction log to the dataLogDir rather than the dataDir. -## This allows a dedicated log device to be used, and helps avoid competition between logging and snaphots. -## Example: +## @param jvmFlags Default JVM flags for the ZooKeeper process +## +jvmFlags: "" +## @param dataLogDir Dedicated data log directory +## This allows a dedicated log device to be used, and helps avoid competition between logging and snapshots. +## E.g. ## dataLogDir: /bitnami/zookeeper/dataLog ## dataLogDir: "" - -## Default JVMFLAGS for the ZooKeeper process +## @param configuration Configure ZooKeeper with a custom zoo.cfg file +## e.g: +## configuration: |- +## deploy-working-dir=/bitnami/geode/data +## log-level=info +## ... ## -# jvmFlags: - -## Configure ZooKeeper with a custom zoo.cfg file +configuration: "" +## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for ZooKeeper +## NOTE: When it's set the `configuration` parameter is ignored ## -# config: - -## Kubernetes configuration -## For minikube, set this to NodePort, elsewhere use LoadBalancer +existingConfigmap: "" +## @param extraEnvVars Array with extra environment variables to add to ZooKeeper nodes +## e.g: +## extraEnvVars: +## - name: FOO +## value: "bar" ## -service: - type: ClusterIP - port: 2181 - followerPort: 2888 - electionPort: 3888 - publishNotReadyAddresses: true - tls: - client_enable: false - quorum_enable: false - disable_base_client_port: false - - client_port: 3181 - - client_keystore_path: /tls_key_store/key_store_file - client_keystore_password: "" - client_truststore_path: /tls_trust_store/trust_store_file - client_truststore_password: "" - - quorum_keystore_path: /tls_key_store/key_store_file - quorum_keystore_password: "" - quorum_truststore_path: /tls_trust_store/trust_store_file - quorum_truststore_password: "" - annotations: {} - headless: - annotations: {} - -## Service account for Zookeeper to use. -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars for ZooKeeper nodes ## -serviceAccount: - ## Specifies whether a ServiceAccount should be created - ## - create: false - ## The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the zookeeper.fullname template - # name: - -## Zookeeper Pod Security Context -securityContext: - enabled: true - fsGroup: 1001 - runAsUser: 1001 - -## Zookeeper data Persistent Volume Storage Class -## If defined, storageClassName: -## If set to "-", storageClassName: "", which disables dynamic provisioning -## If undefined (the default) or set to null, no storageClassName spec is -## set, choosing the default provisioner. (gp2 on AWS, standard on -## GKE, AWS & OpenStack) +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra env vars for ZooKeeper nodes ## -persistence: - ## A manually managed Persistent Volume and Claim - ## If defined, PVC must be created manually before volume will be bound - ## The value is evaluated as a template - ## - # existingClaim: - - enabled: true - # storageClass: "-" - accessModes: - - ReadWriteOnce - size: 8Gi - annotations: {} - dataLogDir: - size: 8Gi - ## A manually managed Persistent Volume and Claim - ## If defined, PVC must be created manually before volume will be bound - ## The value is evaluated as a template - ## - # existingClaim: - - -## Node labels for pod assignment -## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +extraEnvVarsSecret: "" +## @param command Override default container command (useful when using custom images) ## -nodeSelector: {} - -## Tolerations for pod assignment -## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +command: + - /scripts/setup.sh +## @param args Override default container args (useful when using custom images) ## -tolerations: [] +args: [] -## Labels -## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ -podLabels: {} +## @section Statefulset parameters -## Annotations -## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ -podAnnotations: {} - -## Name of the priority class to be used by zookeeper pods, priority class needs to be created beforehand -## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ -priorityClassName: "" - -## Affinity for pod assignment -## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## @param replicaCount Number of ZooKeeper nodes ## -affinity: {} - -## Scheduler name -## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +replicaCount: 1 +## @param containerPorts.client ZooKeeper client container port +## @param containerPorts.tls ZooKeeper TLS container port +## @param containerPorts.follower ZooKeeper follower container port +## @param containerPorts.election ZooKeeper election container port ## -# schedulerName: stork - -## Configure resource requests and limits -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## -resources: - requests: - memory: 256Mi - cpu: 250m - -## Configure extra options for liveness and readiness probes -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +containerPorts: + client: 2181 + tls: 3181 + follower: 2888 + election: 3888 +## Configure extra options for ZooKeeper containers' liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on ZooKeeper containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## @param livenessProbe.probeCommandTimeout Probe command timeout for livenessProbe ## livenessProbe: enabled: true @@ -324,7 +266,14 @@ livenessProbe: failureThreshold: 6 successThreshold: 1 probeCommandTimeout: 2 - +## @param readinessProbe.enabled Enable readinessProbe on ZooKeeper containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## @param readinessProbe.probeCommandTimeout Probe command timeout for readinessProbe +## readinessProbe: enabled: true initialDelaySeconds: 5 @@ -333,98 +282,598 @@ readinessProbe: failureThreshold: 6 successThreshold: 1 probeCommandTimeout: 2 +## @param startupProbe.enabled Enable startupProbe on ZooKeeper containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 +## @param customLivenessProbe Custom livenessProbe that overrides the default one +## +customLivenessProbe: {} +## @param customReadinessProbe Custom readinessProbe that overrides the default one +## +customReadinessProbe: {} +## @param customStartupProbe Custom startupProbe that overrides the default one +## +customStartupProbe: {} +## @param lifecycleHooks for the ZooKeeper container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## ZooKeeper resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## @param resources.limits The resources limits for the ZooKeeper containers +## @param resources.requests.memory The requested memory for the ZooKeeper containers +## @param resources.requests.cpu The requested cpu for the ZooKeeper containers +## +resources: + limits: {} + requests: + memory: 256Mi + cpu: 250m +## Configure Pods Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enabled ZooKeeper pods' Security Context +## @param podSecurityContext.fsGroup Set ZooKeeper pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 1001 +## Configure Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enabled ZooKeeper containers' Security Context +## @param containerSecurityContext.runAsUser Set ZooKeeper containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set ZooKeeper containers' Security Context runAsNonRoot +## @param containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as nonprivilege +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false +## @param hostAliases ZooKeeper pods host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param podLabels Extra labels for ZooKeeper pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations Annotations for ZooKeeper pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] +## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel +## @param priorityClassName Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param schedulerName Kubernetes pod scheduler registry +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param updateStrategy.type ZooKeeper statefulset strategy type +## @param updateStrategy.rollingUpdate ZooKeeper statefulset rolling update configuration parameters +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + rollingUpdate: {} +## @param extraVolumes Optionally specify extra list of additional volumes for the ZooKeeper pod(s) +## Example Use Case: mount certificates to enable TLS +## e.g: +## extraVolumes: +## - name: zookeeper-keystore +## secret: +## defaultMode: 288 +## secretName: zookeeper-keystore +## - name: zookeeper-truststore +## secret: +## defaultMode: 288 +## secretName: zookeeper-truststore +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s) +## Example Use Case: mount certificates to enable TLS +## e.g: +## extraVolumeMounts: +## - name: zookeeper-keystore +## mountPath: /certs/keystore +## readOnly: true +## - name: zookeeper-truststore +## mountPath: /certs/truststore +## readOnly: true +## +extraVolumeMounts: [] +## @param sidecars Add additional sidecar containers to the ZooKeeper pod(s) +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional init containers to the ZooKeeper pod(s) +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## ZooKeeper Pod Disruption Budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## @param pdb.create Deploy a pdb object for the ZooKeeper pod +## @param pdb.minAvailable Minimum available ZooKeeper replicas +## @param pdb.maxUnavailable Maximum unavailable ZooKeeper replicas +## +pdb: + create: false + minAvailable: "" + maxUnavailable: 1 +## @section Traffic Exposure parameters + +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.ports.client ZooKeeper client service port + ## @param service.ports.tls ZooKeeper TLS service port + ## @param service.ports.follower ZooKeeper follower service port + ## @param service.ports.election ZooKeeper election service port + ## + ports: + client: 2181 + tls: 3181 + follower: 2888 + election: 3888 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param service.nodePorts.client Node port for clients + ## @param service.nodePorts.tls Node port for TLS + ## + nodePorts: + client: "" + tls: "" + ## @param service.disableBaseClientPort Remove client port from service definitions. + ## + disableBaseClientPort: false + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param service.clusterIP ZooKeeper service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP ZooKeeper service Load Balancer IP + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges ZooKeeper service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy ZooKeeper service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for ZooKeeper service + ## + annotations: {} + ## @param service.extraPorts Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.headless.annotations Annotations for the Headless Service + ## @param service.headless.publishNotReadyAddresses If the ZooKeeper headless service should publish DNS records for not ready pods + ## @param service.headless.servicenameOverride String to partially override headless service name + ## + headless: + publishNotReadyAddresses: true + annotations: {} + servicenameOverride: "" ## Network policies ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ ## networkPolicy: - ## Specifies whether a NetworkPolicy should be created + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created ## enabled: false - - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to the port Redis is listening - ## on. When true, zookeeper accept connections from any source - ## (with the correct destination port). + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port Redis® is + ## listening on. When true, zookeeper accept connections from any source (with the correct destination port). ## - # allowExternal: true + allowExternal: true -## Zookeeper Prometheus Exporter configuration +## @section Other Parameters + +## Service account for ZooKeeper to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for ZooKeeper pod + ## + create: false + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + +## @section Persistence parameters + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable ZooKeeper data persistence using PVC. If false, use emptyDir + ## + enabled: true + ## @param persistence.existingClaim Name of an existing PVC to use (only when deploying a single replica) + ## + existingClaim: "" + ## @param persistence.storageClass PVC Storage Class for ZooKeeper data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access modes + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for ZooKeeper data volume + ## + size: 8Gi + ## @param persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param persistence.labels Labels for the PVC + ## + labels: {} + ## @param persistence.selector Selector to match an existing Persistent Volume for ZooKeeper's data PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## Persistence for a dedicated data log directory + ## + dataLogDir: + ## @param persistence.dataLogDir.size PVC Storage Request for ZooKeeper's dedicated data log directory + ## + size: 8Gi + ## @param persistence.dataLogDir.existingClaim Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param persistence.dataLogDir.selector Selector to match an existing Persistent Volume for ZooKeeper's data log PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r69 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests + ## + resources: + limits: {} + requests: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.enabled Enabled init container Security Context + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + enabled: true + runAsUser: 0 + +## @section Metrics parameters +## + +## ZooKeeper Prometheus Exporter configuration ## metrics: + ## @param metrics.enabled Enable Prometheus to access ZooKeeper metrics endpoint + ## enabled: false - - ## Zookeeper Prometheus Exporter container port + ## @param metrics.containerPort ZooKeeper Prometheus Exporter container port ## containerPort: 9141 - ## Service configuration ## service: - ## Zookeeper Prometheus Exporter service type + ## @param metrics.service.type ZooKeeper Prometheus Exporter service type ## type: ClusterIP - ## Zookeeper Prometheus Exporter service port + ## @param metrics.service.port ZooKeeper Prometheus Exporter service port ## port: 9141 - ## Annotations for the Zookeeper Prometheus Exporter metrics service + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint ## annotations: prometheus.io/scrape: "true" prometheus.io/port: "{{ .Values.metrics.service.port }}" prometheus.io/path: "/metrics" - ## Prometheus Operator ServiceMonitor configuration ## serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## enabled: false - ## Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) ## - namespace: - - ## Interval at which metrics should be scraped. + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint ## - # interval: 10s - - ## Timeout after which the scrape is ended + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint ## - # scrapeTimeout: 10s - - ## ServiceMonitor selector labels - ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + scrapeTimeout: "" + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus ## - # selector: - # prometheus: my-prometheus - + additionalLabels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" ## Prometheus Operator PrometheusRule configuration ## prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## enabled: false - ## Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) ## - namespace: - - ## PrometheusRule selector labels - ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## - alert: ZooKeeperSyncedFollowers + ## annotations: + ## message: The number of synced followers for the leader node in ZooKeeper deployment my-release is less than 2. This usually means that some of the ZooKeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). + ## expr: max(synced_followers{service="my-release-metrics"}) < 2 + ## for: 5m + ## labels: + ## severity: critical + ## - alert: ZooKeeperOutstandingRequests + ## annotations: + ## message: The number of outstanding requests for ZooKeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. + ## expr: outstanding_requests{service="my-release-metrics"} > 10 + ## for: 5m + ## labels: + ## severity: critical ## - # selector: - # prometheus: my-prometheus - - ## Some example rules. rules: [] - # - alert: ZookeeperSyncedFollowers - # annotations: - # message: The number of synced followers for the leader node in Zookeeper deployment my-release is less than 2. This usually means that some of the Zookeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). - # expr: max(synced_followers{service="my-release-metrics"}) < 2 - # for: 5m - # labels: - # severity: critical - # - alert: ZookeeperOutstandingRequests - # annotations: - # message: The number of outstanding requests for Zookeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. - # expr: outstanding_requests{service="my-release-metrics"} > 10 - # for: 5m - # labels: - # severity: critical + +## @section TLS/SSL parameters +## + +## Enable SSL/TLS encryption +## +tls: + client: + ## @param tls.client.enabled Enable TLS for client connections + ## + enabled: false + ## @param tls.client.auth SSL Client auth. Can be "none", "want" or "need". + ## + auth: "none" + ## @param tls.client.autoGenerated Generate automatically self-signed TLS certificates for ZooKeeper client communications + ## Currently only supports PEM certificates + ## + autoGenerated: false + ## @param tls.client.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper client communications + ## + existingSecret: "" + ## @param tls.client.existingSecretKeystoreKey The secret key from the tls.client.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: "" + ## @param tls.client.existingSecretTruststoreKey The secret key from the tls.client.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: "" + ## @param tls.client.keystorePath Location of the KeyStore file used for Client connections + ## + keystorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks + ## @param tls.client.truststorePath Location of the TrustStore file used for Client connections + ## + truststorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks + ## @param tls.client.passwordsSecretName Existing secret containing Keystore and truststore passwords + ## + passwordsSecretName: "" + ## @param tls.client.passwordsSecretKeystoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: "" + ## @param tls.client.passwordsSecretTruststoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: "" + ## @param tls.client.keystorePassword Password to access KeyStore if needed + ## + keystorePassword: "" + ## @param tls.client.truststorePassword Password to access TrustStore if needed + ## + truststorePassword: "" + quorum: + ## @param tls.quorum.enabled Enable TLS for quorum protocol + ## + enabled: false + ## @param tls.quorum.auth SSL Quorum Client auth. Can be "none", "want" or "need". + ## + auth: "none" + ## @param tls.quorum.autoGenerated Create self-signed TLS certificates. Currently only supports PEM certificates. + ## + autoGenerated: false + ## @param tls.quorum.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol + ## + existingSecret: "" + ## @param tls.quorum.existingSecretKeystoreKey The secret key from the tls.quorum.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: "" + ## @param tls.quorum.existingSecretTruststoreKey The secret key from the tls.quorum.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: "" + ## @param tls.quorum.keystorePath Location of the KeyStore file used for Quorum protocol + ## + keystorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks + ## @param tls.quorum.truststorePath Location of the TrustStore file used for Quorum protocol + ## + truststorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks + ## @param tls.quorum.passwordsSecretName Existing secret containing Keystore and truststore passwords + ## + passwordsSecretName: "" + ## @param tls.quorum.passwordsSecretKeystoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: "" + ## @param tls.quorum.passwordsSecretTruststoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: "" + ## @param tls.quorum.keystorePassword Password to access KeyStore if needed + ## + keystorePassword: "" + ## @param tls.quorum.truststorePassword Password to access TrustStore if needed + ## + truststorePassword: "" + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param tls.resources.limits The resources limits for the TLS init container + ## @param tls.resources.requests The requested resources for the TLS init container + ## + resources: + limits: {} + requests: {} diff --git a/scripts/helmcharts/databases/charts/kafka/files/jks/README.md b/scripts/helmcharts/databases/charts/kafka/files/jks/README.md deleted file mode 100755 index e110a8825..000000000 --- a/scripts/helmcharts/databases/charts/kafka/files/jks/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Java Key Stores - -You can copy here your Java Key Stores (JKS) files so a secret is created including them. Remember to use a truststore (`kafka.truststore.jks`) and one keystore (`kafka.keystore.jks`) per Kafka broker you have in the cluster. For instance, if you have 3 brokers you need to copy here the following files: - -- kafka.truststore.jks -- kafka-0.keystore.jks -- kafka-1.keystore.jks -- kafka-2.keystore.jks - -Find more info in [this section](https://github.com/bitnami/charts/tree/master/bitnami/kafka#enable-security-for-kafka-and-zookeeper) of the README.md file. diff --git a/scripts/helmcharts/databases/charts/kafka/kafka.yaml b/scripts/helmcharts/databases/charts/kafka/kafka.yaml deleted file mode 100644 index acd718957..000000000 --- a/scripts/helmcharts/databases/charts/kafka/kafka.yaml +++ /dev/null @@ -1,521 +0,0 @@ ---- -# Source: kafka/templates/serviceaccount.yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kafka - labels: - app.kubernetes.io/name: kafka - helm.sh/chart: kafka-11.8.6 - app.kubernetes.io/instance: kafka - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: kafka ---- -# Source: kafka/templates/scripts-configmap.yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: kafka-scripts - labels: - app.kubernetes.io/name: kafka - helm.sh/chart: kafka-11.8.6 - app.kubernetes.io/instance: kafka - app.kubernetes.io/managed-by: Helm -data: - setup.sh: |- - #!/bin/bash - - ID="${MY_POD_NAME#"kafka-"}" - export KAFKA_CFG_BROKER_ID="$ID" - - exec /entrypoint.sh /run.sh ---- -# Source: kafka/charts/zookeeper/templates/svc-headless.yaml -apiVersion: v1 -kind: Service -metadata: - name: kafka-zookeeper-headless - namespace: db - labels: - app.kubernetes.io/name: zookeeper - helm.sh/chart: zookeeper-5.21.9 - app.kubernetes.io/instance: kafka - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: zookeeper -spec: - type: ClusterIP - clusterIP: None - publishNotReadyAddresses: true - ports: - - - name: tcp-client - port: 2181 - targetPort: client - - - - name: follower - port: 2888 - targetPort: follower - - name: tcp-election - port: 3888 - targetPort: election - selector: - app.kubernetes.io/name: zookeeper - app.kubernetes.io/instance: kafka - app.kubernetes.io/component: zookeeper ---- -# Source: kafka/charts/zookeeper/templates/svc.yaml -apiVersion: v1 -kind: Service -metadata: - name: kafka-zookeeper - namespace: db - labels: - app.kubernetes.io/name: zookeeper - helm.sh/chart: zookeeper-5.21.9 - app.kubernetes.io/instance: kafka - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: zookeeper -spec: - type: ClusterIP - ports: - - - name: tcp-client - port: 2181 - targetPort: client - - - - name: follower - port: 2888 - targetPort: follower - - name: tcp-election - port: 3888 - targetPort: election - selector: - app.kubernetes.io/name: zookeeper - app.kubernetes.io/instance: kafka - app.kubernetes.io/component: zookeeper ---- -# Source: kafka/templates/kafka-metrics-svc.yaml -apiVersion: v1 -kind: Service -metadata: - name: kafka-metrics - labels: - app.kubernetes.io/name: kafka - helm.sh/chart: kafka-11.8.6 - app.kubernetes.io/instance: kafka - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: metrics - annotations: - - prometheus.io/path: /metrics - prometheus.io/port: '9308' - prometheus.io/scrape: "true" -spec: - type: ClusterIP - ports: - - name: http-metrics - port: 9308 - protocol: TCP - targetPort: metrics - nodePort: null - selector: - app.kubernetes.io/name: kafka - app.kubernetes.io/instance: kafka - app.kubernetes.io/component: metrics ---- -# Source: kafka/templates/svc-headless.yaml -apiVersion: v1 -kind: Service -metadata: - name: kafka-headless - labels: - app.kubernetes.io/name: kafka - helm.sh/chart: kafka-11.8.6 - app.kubernetes.io/instance: kafka - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: kafka -spec: - type: ClusterIP - clusterIP: None - ports: - - name: tcp-client - port: 9092 - protocol: TCP - targetPort: kafka-client - - name: tcp-internal - port: 9093 - protocol: TCP - targetPort: kafka-internal - selector: - app.kubernetes.io/name: kafka - app.kubernetes.io/instance: kafka - app.kubernetes.io/component: kafka ---- -# Source: kafka/templates/svc.yaml -apiVersion: v1 -kind: Service -metadata: - name: kafka - labels: - app.kubernetes.io/name: kafka - helm.sh/chart: kafka-11.8.6 - app.kubernetes.io/instance: kafka - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: kafka -spec: - type: ClusterIP - ports: - - name: tcp-client - port: 9092 - protocol: TCP - targetPort: kafka-client - nodePort: null - selector: - app.kubernetes.io/name: kafka - app.kubernetes.io/instance: kafka - app.kubernetes.io/component: kafka ---- -# Source: kafka/templates/kafka-metrics-deployment.yaml -apiVersion: apps/v1 -kind: Deployment -metadata: - name: kafka-exporter - labels: - app.kubernetes.io/name: kafka - helm.sh/chart: kafka-11.8.6 - app.kubernetes.io/instance: kafka - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: metrics -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: kafka - app.kubernetes.io/instance: kafka - app.kubernetes.io/component: metrics - template: - metadata: - labels: - app.kubernetes.io/name: kafka - helm.sh/chart: kafka-11.8.6 - app.kubernetes.io/instance: kafka - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: metrics - spec: - containers: - - name: kafka-exporter - image: docker.io/bitnami/kafka-exporter:1.2.0-debian-10-r220 - imagePullPolicy: "IfNotPresent" - command: - - /bin/bash - - -ec - - | - read -r -a sasl_passwords <<< "$(tr ',;' ' ' <<< "${SASL_USER_PASSWORD}")" - kafka_exporter \ - --kafka.server=kafka-0.kafka-headless.db.svc.cluster.local:9092 \ - --kafka.server=kafka-1.kafka-headless.db.svc.cluster.local:9092 \ - --web.listen-address=:9308 - ports: - - name: metrics - containerPort: 9308 - resources: - limits: {} - requests: {} ---- -# Source: kafka/charts/zookeeper/templates/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: kafka-zookeeper - namespace: db - labels: - app.kubernetes.io/name: zookeeper - helm.sh/chart: zookeeper-5.21.9 - app.kubernetes.io/instance: kafka - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: zookeeper - role: zookeeper -spec: - serviceName: kafka-zookeeper-headless - replicas: 1 - podManagementPolicy: Parallel - updateStrategy: - type: RollingUpdate - selector: - matchLabels: - app.kubernetes.io/name: zookeeper - app.kubernetes.io/instance: kafka - app.kubernetes.io/component: zookeeper - template: - metadata: - name: kafka-zookeeper - labels: - app.kubernetes.io/name: zookeeper - helm.sh/chart: zookeeper-5.21.9 - app.kubernetes.io/instance: kafka - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: zookeeper - spec: - - serviceAccountName: default - securityContext: - fsGroup: 1001 - containers: - - name: zookeeper - image: docker.io/bitnami/zookeeper:3.6.2-debian-10-r10 - imagePullPolicy: "IfNotPresent" - securityContext: - runAsUser: 1001 - command: - - bash - - -ec - - | - # Execute entrypoint as usual after obtaining ZOO_SERVER_ID based on POD hostname - HOSTNAME=`hostname -s` - if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then - ORD=${BASH_REMATCH[2]} - export ZOO_SERVER_ID=$((ORD+1)) - else - echo "Failed to get index from hostname $HOST" - exit 1 - fi - exec /entrypoint.sh /run.sh - resources: - requests: - cpu: 250m - memory: 256Mi - env: - - name: ZOO_DATA_LOG_DIR - value: "" - - name: ZOO_PORT_NUMBER - value: "2181" - - name: ZOO_TICK_TIME - value: "2000" - - name: ZOO_INIT_LIMIT - value: "10" - - name: ZOO_SYNC_LIMIT - value: "5" - - name: ZOO_MAX_CLIENT_CNXNS - value: "60" - - name: ZOO_4LW_COMMANDS_WHITELIST - value: "srvr, mntr, ruok" - - name: ZOO_LISTEN_ALLIPS_ENABLED - value: "no" - - name: ZOO_AUTOPURGE_INTERVAL - value: "0" - - name: ZOO_AUTOPURGE_RETAIN_COUNT - value: "3" - - name: ZOO_MAX_SESSION_TIMEOUT - value: "40000" - - name: ZOO_SERVERS - value: kafka-zookeeper-0.kafka-zookeeper-headless.db.svc.cluster.local:2888:3888 - - name: ZOO_ENABLE_AUTH - value: "no" - - name: ZOO_HEAP_SIZE - value: "1024" - - name: ZOO_LOG_LEVEL - value: "ERROR" - - name: ALLOW_ANONYMOUS_LOGIN - value: "yes" - - name: POD_NAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: metadata.name - ports: - - - name: client - containerPort: 2181 - - - - name: follower - containerPort: 2888 - - name: election - containerPort: 3888 - livenessProbe: - exec: - command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok'] - initialDelaySeconds: 30 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 6 - readinessProbe: - exec: - command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok'] - initialDelaySeconds: 5 - periodSeconds: 10 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 6 - volumeMounts: - - name: data - mountPath: /bitnami/zookeeper - volumes: - volumeClaimTemplates: - - metadata: - name: data - annotations: - spec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: "8Gi" ---- -# Source: kafka/templates/statefulset.yaml -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: kafka - labels: - app.kubernetes.io/name: kafka - helm.sh/chart: kafka-11.8.6 - app.kubernetes.io/instance: kafka - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: kafka -spec: - podManagementPolicy: Parallel - replicas: 2 - selector: - matchLabels: - app.kubernetes.io/name: kafka - app.kubernetes.io/instance: kafka - app.kubernetes.io/component: kafka - serviceName: kafka-headless - updateStrategy: - type: "RollingUpdate" - template: - metadata: - labels: - app.kubernetes.io/name: kafka - helm.sh/chart: kafka-11.8.6 - app.kubernetes.io/instance: kafka - app.kubernetes.io/managed-by: Helm - app.kubernetes.io/component: kafka - spec: - securityContext: - fsGroup: 1001 - runAsUser: 1001 - serviceAccountName: kafka - containers: - - name: kafka - image: docker.io/bitnami/kafka:2.6.0-debian-10-r30 - imagePullPolicy: "IfNotPresent" - command: - - /scripts/setup.sh - env: - - name: BITNAMI_DEBUG - value: "false" - - name: MY_POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: MY_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: KAFKA_CFG_ZOOKEEPER_CONNECT - value: "kafka-zookeeper" - - name: KAFKA_INTER_BROKER_LISTENER_NAME - value: "INTERNAL" - - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP - value: "INTERNAL:PLAINTEXT,CLIENT:PLAINTEXT" - - name: KAFKA_CFG_LISTENERS - value: "INTERNAL://:9093,CLIENT://:9092" - - name: KAFKA_CFG_ADVERTISED_LISTENERS - value: "INTERNAL://$(MY_POD_NAME).kafka-headless.db.svc.cluster.local:9093,CLIENT://$(MY_POD_NAME).kafka-headless.db.svc.cluster.local:9092" - - name: ALLOW_PLAINTEXT_LISTENER - value: "yes" - - name: KAFKA_CFG_DELETE_TOPIC_ENABLE - value: "false" - - name: KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE - value: "true" - - name: KAFKA_HEAP_OPTS - value: "-Xmx1024m -Xms1024m" - - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES - value: "10000" - - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS - value: "1000" - - name: KAFKA_CFG_LOG_RETENTION_BYTES - value: "1073741824" - - name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVALS_MS - value: "300000" - - name: KAFKA_CFG_LOG_RETENTION_HOURS - value: "168" - - name: KAFKA_CFG_MESSAGE_MAX_BYTES - value: "1000012" - - name: KAFKA_CFG_LOG_SEGMENT_BYTES - value: "1073741824" - - name: KAFKA_CFG_LOG_DIRS - value: "/bitnami/kafka/data" - - name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR - value: "1" - - name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR - value: "1" - - name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR - value: "1" - - name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR - value: "1" - - name: KAFKA_CFG_NUM_IO_THREADS - value: "8" - - name: KAFKA_CFG_NUM_NETWORK_THREADS - value: "3" - - name: KAFKA_CFG_NUM_PARTITIONS - value: "1" - - name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR - value: "1" - - name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES - value: "102400" - - name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES - value: "104857600" - - name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES - value: "102400" - - name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS - value: "6000" - ports: - - name: kafka-client - containerPort: 9092 - - name: kafka-internal - containerPort: 9093 - livenessProbe: - tcpSocket: - port: kafka-client - initialDelaySeconds: 10 - timeoutSeconds: 5 - failureThreshold: - periodSeconds: - successThreshold: - readinessProbe: - tcpSocket: - port: kafka-client - initialDelaySeconds: 5 - timeoutSeconds: 5 - failureThreshold: 6 - periodSeconds: - successThreshold: - resources: - limits: {} - requests: {} - volumeMounts: - - name: data - mountPath: /bitnami/kafka - - name: scripts - mountPath: /scripts/setup.sh - subPath: setup.sh - volumes: - - name: scripts - configMap: - name: kafka-scripts - defaultMode: 0755 - volumeClaimTemplates: - - metadata: - name: data - spec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: "8Gi" diff --git a/scripts/helmcharts/databases/charts/kafka/requirements.lock b/scripts/helmcharts/databases/charts/kafka/requirements.lock deleted file mode 100755 index 115d0b229..000000000 --- a/scripts/helmcharts/databases/charts/kafka/requirements.lock +++ /dev/null @@ -1,6 +0,0 @@ -dependencies: -- name: zookeeper - repository: https://charts.bitnami.com/bitnami - version: 5.21.9 -digest: sha256:2f3c43ce02e3966648b8c89be121fe39537f62ea1d161ad908f51ddc90e4243e -generated: "2020-09-29T07:43:56.483358254Z" diff --git a/scripts/helmcharts/databases/charts/kafka/requirements.yaml b/scripts/helmcharts/databases/charts/kafka/requirements.yaml deleted file mode 100755 index 533875258..000000000 --- a/scripts/helmcharts/databases/charts/kafka/requirements.yaml +++ /dev/null @@ -1,5 +0,0 @@ -dependencies: - - name: zookeeper - version: 5.x.x - repository: https://charts.bitnami.com/bitnami - condition: zookeeper.enabled diff --git a/scripts/helmcharts/databases/charts/kafka/templates/NOTES.txt b/scripts/helmcharts/databases/charts/kafka/templates/NOTES.txt old mode 100755 new mode 100644 index 0347c21c4..f34c2563f --- a/scripts/helmcharts/databases/charts/kafka/templates/NOTES.txt +++ b/scripts/helmcharts/databases/charts/kafka/templates/NOTES.txt @@ -1,43 +1,40 @@ -{{- $replicaCount := int .Values.replicaCount -}} -{{- $releaseNamespace := .Release.Namespace -}} -{{- $clusterDomain := .Values.clusterDomain -}} -{{- $fullname := include "kafka.fullname" . -}} -{{- $clientProtocol := include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) -}} -{{- $servicePort := int .Values.service.port -}} -{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs -}} -{{- if and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer") }} +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} -############################################################################### -### ERROR: You enabled external access to Kafka brokers without specifying ### -### the array of load balancer IPs for Kafka brokers. ### -############################################################################### +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: -This deployment will be incomplete until you configure the array of load balancer -IPs for Kafka brokers. To complete your deployment follow the steps below: + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} -1. Wait for the load balancer IPs (it may take a few minutes for them to be available): +Get the list of pods by executing: - kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w + kubectl get pods --namespace {{ .Release.Namespace }} -l app.kubernetes.io/instance={{ .Release.Name }} -2. Obtain the load balancer IPs and upgrade your chart: +Access the pod you want to debug by executing - {{- range $i, $e := until $replicaCount }} - LOAD_BALANCER_IP_{{ add $i 1 }}="$(kubectl get svc --namespace {{ $releaseNamespace }} {{ $fullname }}-{{ $i }}-external -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" - {{- end }} + kubectl exec --namespace {{ .Release.Namespace }} -ti -- bash -3. Upgrade you chart: +In order to replicate the container startup scripts execute this command: - helm upgrade {{ .Release.Name }} bitnami/{{ .Chart.Name }} \ - --set replicaCount={{ $replicaCount }} \ - --set externalAccess.enabled=true \ - {{- range $i, $e := until $replicaCount }} - --set externalAccess.service.loadBalancerIPs[{{ $i }}]=$LOAD_BALANCER_IP_{{ add $i 1 }} \ - {{- end }} - --set externalAccess.service.type=LoadBalancer + /opt/bitnami/scripts/kafka/entrypoint.sh /opt/bitnami/scripts/kafka/run.sh {{- else }} -{{- if and (or (eq .Values.service.type "LoadBalancer") .Values.externalAccess.enabled) (eq .Values.auth.clientProtocol "plaintext") }} +{{- $replicaCount := int .Values.replicaCount -}} +{{- $releaseNamespace := .Release.Namespace -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "common.names.fullname" . -}} +{{- $clientProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.clientProtocol) -}} +{{- $externalClientProtocol := include "kafka.listenerType" (dict "protocol" (include "kafka.externalClientProtocol" . )) -}} +{{- $saslMechanisms := .Values.auth.sasl.mechanisms -}} +{{- $tlsEndpointIdentificationAlgorithm := default "" .Values.auth.tls.endpointIdentificationAlgorithm -}} +{{- $tlsPasswordSecret := printf "$(kubectl get secret %s --namespace %s -o jsonpath='{.data.password}' | base64 -d | cut -d , -f 1)" .Values.auth.tls.existingSecret $releaseNamespace -}} +{{- $tlsPassword := ternary .Values.auth.tls.password $tlsPasswordSecret (eq .Values.auth.tls.existingSecret "") -}} +{{- $servicePort := int .Values.service.ports.client -}} + +{{- if and (or (eq .Values.service.type "LoadBalancer") .Values.externalAccess.enabled) (eq $externalClientProtocol "PLAINTEXT") }} --------------------------------------------------------------------------------------------- WARNING @@ -64,57 +61,100 @@ Each Kafka broker can be accessed by producers via port {{ $servicePort }} on th {{- $brokerList = append $brokerList (printf "%s-%d.%s-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $servicePort) }} {{- end }} {{ join "\n" $brokerList | nindent 4 }} - - {{- if (include "kafka.client.saslAuthentication" .) }} -You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files by executing these commands: +You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files with the content below: - kafka_jaas.conf: -cat > kafka_jaas.conf < client.properties <<>(Value) name: kafka_controller_$1_$2_$4 @@ -56,4 +58,7 @@ data: labels: topic: $3 partition: $4 + {{- if .Values.metrics.jmx.extraRules }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.extraRules "context" $ ) | nindent 6 }} + {{- end }} {{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/jmx-metrics-svc.yaml b/scripts/helmcharts/databases/charts/kafka/templates/jmx-metrics-svc.yaml old mode 100755 new mode 100644 index 83edd8422..35c79f41f --- a/scripts/helmcharts/databases/charts/kafka/templates/jmx-metrics-svc.yaml +++ b/scripts/helmcharts/databases/charts/kafka/templates/jmx-metrics-svc.yaml @@ -2,44 +2,33 @@ apiVersion: v1 kind: Service metadata: - name: {{ template "kafka.fullname" . }}-jmx-metrics - labels: {{- include "kafka.labels" . | nindent 4 }} - app.kubernetes.io/component: kafka + name: {{ printf "%s-jmx-metrics" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics {{- if .Values.commonLabels }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if or .Values.metrics.jmx.service.annotations .Values.commonAnnotations }} annotations: {{- if .Values.metrics.jmx.service.annotations }} - {{ include "kafka.tplValue" ( dict "value" .Values.metrics.jmx.service.annotations "context" $) | nindent 4 }} + {{ include "common.tplvalues.render" ( dict "value" .Values.metrics.jmx.service.annotations "context" $) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} {{- end }} spec: - type: {{ .Values.metrics.jmx.service.type }} - {{- if eq .Values.metrics.jmx.service.type "LoadBalancer" }} - {{- if .Values.metrics.jmx.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.metrics.jmx.service.loadBalancerIP }} - {{- end }} - {{- if .Values.metrics.jmx.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: {{- toYaml .Values.metrics.jmx.service.loadBalancerSourceRanges | nindent 4 }} - {{- end }} - {{- end }} - {{- if and (eq .Values.metrics.jmx.service.type "ClusterIP") .Values.metrics.jmx.service.clusterIP }} + type: ClusterIP + sessionAffinity: {{ .Values.metrics.jmx.service.sessionAffinity }} + {{- if .Values.metrics.jmx.service.clusterIP }} clusterIP: {{ .Values.metrics.jmx.service.clusterIP }} {{- end }} ports: - name: http-metrics - port: {{ .Values.metrics.jmx.service.port }} + port: {{ .Values.metrics.jmx.service.ports.metrics }} protocol: TCP targetPort: metrics - {{- if and (or (eq .Values.metrics.jmx.service.type "NodePort") (eq .Values.metrics.jmx.service.type "LoadBalancer")) (not (empty .Values.metrics.jmx.service.nodePort)) }} - nodePort: {{ .Values.metrics.jmx.service.nodePort }} - {{- else if eq .Values.metrics.jmx.service.type "ClusterIP" }} - nodePort: null - {{- end }} - selector: {{- include "kafka.matchLabels" . | nindent 4 }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} app.kubernetes.io/component: kafka {{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-deployment.yaml b/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-deployment.yaml old mode 100755 new mode 100644 index c547fbb39..bf731f20b --- a/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-deployment.yaml +++ b/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-deployment.yaml @@ -2,86 +2,170 @@ {{- $replicaCount := int .Values.replicaCount -}} {{- $releaseNamespace := .Release.Namespace -}} {{- $clusterDomain := .Values.clusterDomain -}} -{{- $fullname := include "kafka.fullname" . -}} -{{- $servicePort := int .Values.service.port -}} -apiVersion: apps/v1 +{{- $fullname := include "common.names.fullname" . -}} +{{- $servicePort := int .Values.service.ports.client -}} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} kind: Deployment metadata: - name: {{ template "kafka.fullname" . }}-exporter - labels: {{- include "kafka.labels" . | nindent 4 }} - app.kubernetes.io/component: metrics + name: {{ include "kafka.metrics.kafka.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: cluster-metrics {{- if .Values.commonLabels }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} spec: replicas: 1 selector: - matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} - app.kubernetes.io/component: metrics + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: cluster-metrics template: metadata: - labels: {{- include "kafka.labels" . | nindent 8 }} - app.kubernetes.io/component: metrics + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.metrics.kafka.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.metrics.kafka.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.podAnnotations "context" $) | nindent 8 }} + {{- end }} spec: -{{- include "kafka.imagePullSecrets" . | indent 6 }} + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.metrics.kafka.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.metrics.kafka.podAntiAffinityPreset "component" "metrics" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.metrics.kafka.nodeAffinityPreset.type "key" .Values.metrics.kafka.nodeAffinityPreset.key "values" .Values.metrics.kafka.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.metrics.kafka.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.kafka.priorityClassName }} + priorityClassName: {{ .Values.metrics.kafka.priorityClassName }} + {{- end }} + {{- if .Values.metrics.kafka.schedulerName }} + schedulerName: {{ .Values.metrics.kafka.schedulerName }} + {{- end }} + {{- if .Values.metrics.kafka.podSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.kafka.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "kafka.metrics.kafka.serviceAccountName" . }} + {{- if .Values.metrics.kafka.initContainers }} + initContainers: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.initContainers "context" $) | nindent 8 }} + {{- end }} containers: - name: kafka-exporter - image: {{ include "kafka.metrics.kafka.image" . }} + image: {{ include "kafka.metrics.kafka.image" . }} imagePullPolicy: {{ .Values.metrics.kafka.image.pullPolicy | quote }} + {{- if .Values.metrics.kafka.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.kafka.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.metrics.kafka.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.command "context" $) | nindent 12 }} + {{- else }} command: - - /bin/bash - - -ec + - bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.metrics.kafka.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.args "context" $) | nindent 12 }} + {{- else }} + args: + - -ce - | - read -r -a sasl_passwords <<< "$(tr ',;' ' ' <<< "${SASL_USER_PASSWORD}")" kafka_exporter \ {{- range $i, $e := until $replicaCount }} --kafka.server={{ $fullname }}-{{ $i }}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $servicePort }} \ {{- end }} {{- if (include "kafka.client.saslAuthentication" .) }} --sasl.enabled \ - --sasl.username="$SASL_USERNAME" \ - --sasl.password="${sasl_passwords[0]}" \ + --sasl.username=$SASL_USERNAME \ + --sasl.password=$SASL_USER_PASSWORD \ + --sasl.mechanism={{ include "kafka.metrics.kafka.saslMechanism" . }} \ {{- end }} - {{- if (include "kafka.tlsEncryption" .) }} + {{- if (include "kafka.client.tlsEncryption" .) }} --tls.enabled \ {{- if .Values.metrics.kafka.certificatesSecret }} - --tls.ca-file="/opt/bitnami/kafka-exporter/certs/ca-file" \ - --tls.cert-file="/opt/bitnami/kafka-exporter/certs/cert-file" \ - --tls.key-file="/opt/bitnami/kafka-exporter/certs/key-file" \ + --tls.key-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsKey }} \ + --tls.cert-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCert }} \ + {{- if .Values.metrics.kafka.tlsCaSecret }} + --tls.ca-file=/opt/bitnami/kafka-exporter/cacert/{{ .Values.metrics.kafka.tlsCaCert }} \ + {{- else }} + --tls.ca-file=/opt/bitnami/kafka-exporter/certs/{{ .Values.metrics.kafka.tlsCaCert }} \ + {{- end }} {{- end }} {{- end }} {{- range $key, $value := .Values.metrics.kafka.extraFlags }} --{{ $key }}{{ if $value }}={{ $value }}{{ end }} \ {{- end }} - --web.listen-address=:9308 + --web.listen-address=:{{ .Values.metrics.kafka.containerPorts.metrics }} + {{- end }} {{- if (include "kafka.client.saslAuthentication" .) }} + {{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }} env: - name: SASL_USERNAME - value: {{ index .Values.auth.jaas.clientUsers 0 | quote }} + value: {{ index $clientUsers 0 | quote }} - name: SASL_USER_PASSWORD valueFrom: secretKeyRef: name: {{ include "kafka.jaasSecretName" . }} - key: client-passwords + key: system-user-password {{- end }} ports: - name: metrics - containerPort: 9308 + containerPort: {{ .Values.metrics.kafka.containerPorts.metrics }} {{- if .Values.metrics.kafka.resources }} resources: {{ toYaml .Values.metrics.kafka.resources | nindent 12 }} {{- end }} - {{- if and (include "kafka.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} volumeMounts: + {{- if .Values.metrics.kafka.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if and (include "kafka.client.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} - name: kafka-exporter-certificates mountPath: /opt/bitnami/kafka-exporter/certs/ readOnly: true + {{- if .Values.metrics.kafka.tlsCaSecret }} + - name: kafka-exporter-ca-certificate + mountPath: /opt/bitnami/kafka-exporter/cacert/ + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.metrics.kafka.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.sidecars "context" $) | nindent 8 }} + {{- end }} volumes: + {{- if .Values.metrics.kafka.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.kafka.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and (include "kafka.client.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} - name: kafka-exporter-certificates secret: secretName: {{ .Values.metrics.kafka.certificatesSecret }} defaultMode: 0440 - {{- end }} + {{- if .Values.metrics.kafka.tlsCaSecret }} + - name: kafka-exporter-ca-certificate + secret: + secretName: {{ .Values.metrics.kafka.tlsCaSecret }} + defaultMode: 0440 + {{- end }} + {{- end }} {{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-serviceaccount.yaml b/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-serviceaccount.yaml new file mode 100644 index 000000000..f8e3eb305 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.metrics.kafka.enabled .Values.metrics.kafka.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.metrics.kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.metrics.kafka.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-svc.yaml b/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-svc.yaml old mode 100755 new mode 100644 index 54a4ccb0b..9daae4a1a --- a/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-svc.yaml +++ b/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-svc.yaml @@ -2,44 +2,33 @@ apiVersion: v1 kind: Service metadata: - name: {{ template "kafka.fullname" . }}-metrics - labels: {{- include "kafka.labels" . | nindent 4 }} - app.kubernetes.io/component: metrics + name: {{ printf "%s-metrics" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: cluster-metrics {{- if .Values.commonLabels }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if or .Values.metrics.kafka.service.annotations .Values.commonAnnotations }} annotations: {{- if .Values.metrics.kafka.service.annotations }} - {{ include "kafka.tplValue" ( dict "value" .Values.metrics.kafka.service.annotations "context" $) | nindent 4 }} + {{ include "common.tplvalues.render" ( dict "value" .Values.metrics.kafka.service.annotations "context" $) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} {{- end }} spec: - type: {{ .Values.metrics.kafka.service.type }} - {{- if eq .Values.metrics.kafka.service.type "LoadBalancer" }} - {{- if .Values.metrics.kafka.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.metrics.kafka.service.loadBalancerIP }} - {{- end }} - {{- if .Values.metrics.kafka.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: {{- toYaml .Values.metrics.kafka.service.loadBalancerSourceRanges | nindent 4 }} - {{- end }} - {{- end }} - {{- if and (eq .Values.metrics.kafka.service.type "ClusterIP") .Values.metrics.kafka.service.clusterIP }} + type: ClusterIP + sessionAffinity: {{ .Values.metrics.kafka.service.sessionAffinity }} + {{- if .Values.metrics.kafka.service.clusterIP }} clusterIP: {{ .Values.metrics.kafka.service.clusterIP }} {{- end }} ports: - name: http-metrics - port: {{ .Values.metrics.kafka.service.port }} + port: {{ .Values.metrics.kafka.service.ports.metrics }} protocol: TCP targetPort: metrics - {{- if and (or (eq .Values.metrics.kafka.service.type "NodePort") (eq .Values.metrics.kafka.service.type "LoadBalancer")) (not (empty .Values.metrics.kafka.service.nodePort)) }} - nodePort: {{ .Values.metrics.kafka.service.nodePort }} - {{- else if eq .Values.metrics.kafka.service.type "ClusterIP" }} - nodePort: null - {{- end }} - selector: {{- include "kafka.matchLabels" . | nindent 4 }} - app.kubernetes.io/component: metrics + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: cluster-metrics {{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/kafka-provisioning-secret.yaml b/scripts/helmcharts/databases/charts/kafka/templates/kafka-provisioning-secret.yaml new file mode 100644 index 000000000..0c0fb1bc1 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/kafka-provisioning-secret.yaml @@ -0,0 +1,19 @@ +{{- if and .Values.provisioning.enabled (include "kafka.client.tlsEncryption" .) (not .Values.provisioning.auth.tls.passwordsSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "kafka.client.passwordsSecretName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + truststore-password: {{ default "" .Values.provisioning.auth.tls.keystorePassword | b64enc | quote }} + keystore-password: {{ default "" .Values.provisioning.auth.tls.truststorePassword | b64enc | quote }} + key-password: {{ default "" .Values.provisioning.auth.tls.keyPassword | b64enc | quote }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/kafka-provisioning-serviceaccount.yaml b/scripts/helmcharts/databases/charts/kafka/templates/kafka-provisioning-serviceaccount.yaml new file mode 100644 index 000000000..47614674c --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/kafka-provisioning-serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.provisioning.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.provisioning.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.provisioning.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/kafka-provisioning.yaml b/scripts/helmcharts/databases/charts/kafka/templates/kafka-provisioning.yaml new file mode 100644 index 000000000..765e88315 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/kafka-provisioning.yaml @@ -0,0 +1,260 @@ +{{- if .Values.provisioning.enabled }} +{{- $replicaCount := int .Values.replicaCount }} +kind: Job +apiVersion: batch/v1 +metadata: + name: {{ printf "%s-provisioning" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: kafka-provisioning + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + helm.sh/hook: post-install,post-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: kafka-provisioning + {{- if .Values.provisioning.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.podLabels "context" $) | nindent 8 }} + {{- end }} + annotations: + {{- if .Values.provisioning.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "kafka.provisioning.serviceAccountName" . }} + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.provisioning.schedulerName }} + schedulerName: {{ .Values.provisioning.schedulerName | quote }} + {{- end }} + {{- if .Values.provisioning.podSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + restartPolicy: OnFailure + terminationGracePeriodSeconds: 0 + {{- if .Values.provisioning.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.provisioning.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if or .Values.provisioning.initContainers .Values.provisioning.waitForKafka }} + initContainers: + {{- if .Values.provisioning.waitForKafka }} + - name: wait-for-available-kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + wait-for-port \ + --host={{ include "common.names.fullname" . }} \ + --state=inuse \ + --timeout=120 \ + {{ .Values.service.ports.client | int64 }}; + echo "Kafka is available"; + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.provisioning.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.initContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + containers: + - name: kafka-provisioning + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.provisioning.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.provisioning.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.provisioning.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.command "context" $) | nindent 12 }} + {{- else }} + command: + - /bin/bash + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.provisioning.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.args "context" $) | nindent 12 }} + {{- else }} + args: + - -ec + - | + echo "Configuring environment" + . /opt/bitnami/scripts/libkafka.sh + export CLIENT_CONF="${CLIENT_CONF:-/opt/bitnami/kafka/config/client.properties}" + if [ ! -f "$CLIENT_CONF" ]; then + touch $CLIENT_CONF + + kafka_common_conf_set "$CLIENT_CONF" security.protocol {{ include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) | quote }} + {{- if (include "kafka.client.tlsEncryption" .) }} + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.type {{ upper .Values.provisioning.auth.tls.type | quote }} + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.type {{ upper .Values.provisioning.auth.tls.type | quote }} + ! is_empty_value "$KAFKA_CLIENT_KEY_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.key.password "$KAFKA_CLIENT_KEY_PASSWORD" + {{- if eq (upper .Values.provisioning.auth.tls.type) "PEM" }} + file_to_multiline_property() { + awk 'NR > 1{print line" \\"}{line=$0;}END{print $0" "}' <"${1:?missing file}" + } + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.key "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.key }}")" + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.certificate.chain "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.caCert }}")" + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.certificates "$(file_to_multiline_property "/certs/{{ .Values.provisioning.auth.tls.cert }}")" + {{- else if eq (upper .Values.provisioning.auth.tls.type) "JKS" }} + kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.location "/certs/{{ .Values.provisioning.auth.tls.keystore }}" + kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.location "/certs/{{ .Values.provisioning.auth.tls.truststore }}" + ! is_empty_value "$KAFKA_CLIENT_KEYSTORE_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.keystore.password "$KAFKA_CLIENT_KEYSTORE_PASSWORD" + ! is_empty_value "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD" && kafka_common_conf_set "$CLIENT_CONF" ssl.truststore.password "$KAFKA_CLIENT_TRUSTSTORE_PASSWORD" + {{- end }} + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + {{- if contains "plain" .Values.auth.sasl.mechanisms }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism PLAIN + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.plain.PlainLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- else if contains "scram-sha-256" .Values.auth.sasl.mechanisms }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism SCRAM-SHA-256 + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- else if contains "scram-sha-512" .Values.auth.sasl.mechanisms }} + kafka_common_conf_set "$CLIENT_CONF" sasl.mechanism SCRAM-SHA-512 + kafka_common_conf_set "$CLIENT_CONF" sasl.jaas.config "org.apache.kafka.common.security.scram.ScramLoginModule required username=\"$SASL_USERNAME\" password=\"$SASL_USER_PASSWORD\";" + {{- end }} + {{- end }} + fi + + echo "Running pre-provisioning script if any given" + {{ .Values.provisioning.preScript | nindent 14 }} + + kafka_provisioning_commands=( + {{- range $topic := .Values.provisioning.topics }} + "/opt/bitnami/kafka/bin/kafka-topics.sh \ + --create \ + --if-not-exists \ + --bootstrap-server ${KAFKA_SERVICE} \ + --replication-factor {{ $topic.replicationFactor | default $.Values.provisioning.replicationFactor }} \ + --partitions {{ $topic.partitions | default $.Values.provisioning.numPartitions }} \ + {{- range $name, $value := $topic.config }} + --config {{ $name }}={{ $value }} \ + {{- end }} + --command-config ${CLIENT_CONF} \ + --topic {{ $topic.name }}" + {{- end }} + {{- range $command := .Values.provisioning.extraProvisioningCommands }} + {{- $command | quote | nindent 16 }} + {{- end }} + ) + + echo "Starting provisioning" + for ((index=0; index < ${#kafka_provisioning_commands[@]}; index+={{ .Values.provisioning.parallel }})) + do + for j in $(seq ${index} $((${index}+{{ .Values.provisioning.parallel }}-1))) + do + ${kafka_provisioning_commands[j]} & # Async command + done + wait # Wait the end of the jobs + done + + echo "Running post-provisioning script if any given" + {{ .Values.provisioning.postScript | nindent 14 }} + + echo "Provisioning succeeded" + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + {{- if (include "kafka.client.tlsEncryption" .) }} + - name: KAFKA_CLIENT_KEY_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.keyPasswordSecretKey }} + - name: KAFKA_CLIENT_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.keystorePasswordSecretKey }} + - name: KAFKA_CLIENT_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "kafka.client.passwordsSecretName" . }} + key: {{ .Values.provisioning.auth.tls.truststorePasswordSecretKey }} + {{- end }} + - name: KAFKA_SERVICE + value: {{ printf "%s:%d" (include "common.names.fullname" .) (.Values.service.ports.client | int64) }} + {{- if (include "kafka.client.saslAuthentication" .) }} + {{- $clientUsers := .Values.auth.sasl.jaas.clientUsers }} + - name: SASL_USERNAME + value: {{ index $clientUsers 0 | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: system-user-password + {{- end }} + {{- if .Values.provisioning.extraEnvVars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.provisioning.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.provisioning.extraEnvVarsCM .Values.provisioning.extraEnvVarsSecret }} + envFrom: + {{- if .Values.provisioning.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.provisioning.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.provisioning.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.provisioning.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} + {{- if .Values.provisioning.resources }} + resources: {{- toYaml .Values.provisioning.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + mountPath: {{ .Values.persistence.mountPath }}/config/log4j.properties + subPath: log4j.properties + {{- end }} + {{- if (include "kafka.client.tlsEncryption" .) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs + mountPath: /certs + readOnly: true + {{- end }} + {{- end }} + {{- if .Values.provisioning.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.provisioning.sidecars }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + configMap: + name: {{ include "kafka.log4j.configMapName" . }} + {{ end }} + {{- if (include "kafka.client.tlsEncryption" .) }} + {{- if not (empty .Values.provisioning.auth.tls.certificatesSecret) }} + - name: kafka-client-certs + secret: + secretName: {{ .Values.provisioning.auth.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if .Values.provisioning.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.provisioning.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/log4j-configmap.yaml b/scripts/helmcharts/databases/charts/kafka/templates/log4j-configmap.yaml old mode 100755 new mode 100644 index 0a34d50dd..8f7bc6c14 --- a/scripts/helmcharts/databases/charts/kafka/templates/log4j-configmap.yaml +++ b/scripts/helmcharts/databases/charts/kafka/templates/log4j-configmap.yaml @@ -3,14 +3,15 @@ apiVersion: v1 kind: ConfigMap metadata: name: {{ include "kafka.log4j.configMapName" . }} - labels: {{- include "kafka.labels" . | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} {{- if .Values.commonLabels }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} data: log4j.properties: |- - {{ .Values.log4j | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.log4j "context" $ ) | nindent 4 }} {{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/networkpolicy-egress.yaml b/scripts/helmcharts/databases/charts/kafka/templates/networkpolicy-egress.yaml new file mode 100644 index 000000000..068024a0e --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/networkpolicy-egress.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.networkPolicy.enabled .Values.networkPolicy.egressRules.customRules }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ printf "%s-egress" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Egress + egress: + {{- include "common.tplvalues.render" (dict "value" .Values.networkPolicy.egressRules.customRules "context" $) | nindent 4 }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/networkpolicy-ingress.yaml b/scripts/helmcharts/databases/charts/kafka/templates/networkpolicy-ingress.yaml new file mode 100644 index 000000000..258dcabb6 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/networkpolicy-ingress.yaml @@ -0,0 +1,53 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ printf "%s-ingress" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + policyTypes: + - Ingress + ingress: + # Allow client connections + - ports: + - port: {{ .Values.containerPorts.client }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: {{- toYaml .Values.networkPolicy.explicitNamespacesSelector | nindent 12 }} + {{- end }} + {{- end }} + # Allow communication inter-broker + - ports: + - port: {{ .Values.containerPorts.internal }} + from: + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + # Allow External connection + {{- if .Values.externalAccess.enabled }} + - ports: + - port: {{ .Values.containerPorts.external }} + {{- if .Values.externalAccess.from }} + from: {{- include "common.tplvalues.render" ( dict "value" .Values.networkPolicy.externalAccess.from "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.metrics.kafka.enabled }} + # Allow prometheus scrapes + - ports: + - port: {{ .Values.metrics.kafka.containerPorts.metrics }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/poddisruptionbudget.yaml b/scripts/helmcharts/databases/charts/kafka/templates/poddisruptionbudget.yaml old mode 100755 new mode 100644 index 6df5c733c..e0a60151d --- a/scripts/helmcharts/databases/charts/kafka/templates/poddisruptionbudget.yaml +++ b/scripts/helmcharts/databases/charts/kafka/templates/poddisruptionbudget.yaml @@ -1,16 +1,17 @@ {{- $replicaCount := int .Values.replicaCount }} {{- if and .Values.pdb.create (gt $replicaCount 1) }} -apiVersion: {{ ternary "policy/v1" "policy/v1beta1" (semverCompare ">=1.21.0-0" .Capabilities.KubeVersion.Version) }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} kind: PodDisruptionBudget metadata: - name: {{ template "kafka.fullname" . }} - labels: {{- include "kafka.labels" . | nindent 4 }} + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka {{- if .Values.commonLabels }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} spec: {{- if .Values.pdb.minAvailable }} @@ -20,6 +21,6 @@ spec: maxUnavailable: {{ .Values.pdb.maxUnavailable }} {{- end }} selector: - matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} app.kubernetes.io/component: kafka {{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/prometheusrule.yaml b/scripts/helmcharts/databases/charts/kafka/templates/prometheusrule.yaml new file mode 100644 index 000000000..bce728a37 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/prometheusrule.yaml @@ -0,0 +1,20 @@ +{{- if and (or .Values.metrics.kafka.enabled .Values.metrics.jmx.enabled) .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.groups }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.prometheusRule.namespace }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" .) | nindent 4 }} + {{- end }} + {{- if .Values.metrics.prometheusRule.labels }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.labels "context" .) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" .) | nindent 4 }} + {{- end }} +spec: + groups: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.prometheusRule.groups "context" .) | nindent 4 }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/role.yaml b/scripts/helmcharts/databases/charts/kafka/templates/role.yaml old mode 100755 new mode 100644 index 943c5bf3c..63215b3b8 --- a/scripts/helmcharts/databases/charts/kafka/templates/role.yaml +++ b/scripts/helmcharts/databases/charts/kafka/templates/role.yaml @@ -1,15 +1,16 @@ {{- if .Values.rbac.create -}} -apiVersion: rbac.authorization.k8s.io/v1 +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} kind: Role metadata: - name: {{ template "kafka.fullname" . }} - labels: {{- include "kafka.labels" . | nindent 4 }} + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka {{- if .Values.commonLabels }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} rules: - apiGroups: diff --git a/scripts/helmcharts/databases/charts/kafka/templates/rolebinding.yaml b/scripts/helmcharts/databases/charts/kafka/templates/rolebinding.yaml old mode 100755 new mode 100644 index 78f940f85..fb5e3a157 --- a/scripts/helmcharts/databases/charts/kafka/templates/rolebinding.yaml +++ b/scripts/helmcharts/databases/charts/kafka/templates/rolebinding.yaml @@ -1,19 +1,20 @@ -{{- if and .Values.serviceAccount.create .Values.rbac.create }} -apiVersion: rbac.authorization.k8s.io/v1 +{{- if .Values.rbac.create }} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} kind: RoleBinding metadata: - name: {{ template "kafka.fullname" . }} - labels: {{- include "kafka.labels" . | nindent 4 }} + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka {{- if .Values.commonLabels }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} roleRef: kind: Role - name: {{ template "kafka.fullname" . }} + name: {{ include "common.names.fullname" . }} apiGroup: rbac.authorization.k8s.io subjects: - kind: ServiceAccount diff --git a/scripts/helmcharts/databases/charts/kafka/templates/scripts-configmap.yaml b/scripts/helmcharts/databases/charts/kafka/templates/scripts-configmap.yaml old mode 100755 new mode 100644 index 705545a61..57e125053 --- a/scripts/helmcharts/databases/charts/kafka/templates/scripts-configmap.yaml +++ b/scripts/helmcharts/databases/charts/kafka/templates/scripts-configmap.yaml @@ -1,20 +1,24 @@ apiVersion: v1 kind: ConfigMap metadata: - name: {{ template "kafka.fullname" . }}-scripts - labels: {{- include "kafka.labels" . | nindent 4 }} + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} {{- if .Values.commonLabels }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} data: - {{- $fullname := include "kafka.fullname" . }} + {{- $fullname := include "common.names.fullname" . }} {{- $releaseNamespace := .Release.Namespace }} {{- $clusterDomain := .Values.clusterDomain }} - {{- $interBrokerPort := .Values.service.internalPort }} - {{- $clientPort := .Values.service.port }} + {{- $interBrokerPort := .Values.service.ports.internal }} + {{- $clientPort := .Values.service.ports.client }} + {{- $jksTruststoreSecret := .Values.auth.tls.jksTruststoreSecret -}} + {{- $jksTruststore := .Values.auth.tls.jksTruststore -}} + {{- $jksKeystoreSAN := .Values.auth.tls.jksKeystoreSAN -}} {{- if .Values.externalAccess.autoDiscovery.enabled }} auto-discovery.sh: |- #!/bin/bash @@ -22,7 +26,7 @@ data: SVC_NAME="${MY_POD_NAME}-external" {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} - # Auxiliar functions + # Auxiliary functions retry_while() { local -r cmd="${1:?cmd is missing}" local -r retries="${2:-12}" @@ -72,47 +76,127 @@ data: #!/bin/bash ID="${MY_POD_NAME#"{{ $fullname }}-"}" - export KAFKA_CFG_BROKER_ID="$ID" + if [[ -f "{{ .Values.logsDirs | splitList "," | first }}/meta.properties" ]]; then + export KAFKA_CFG_BROKER_ID="$(grep "broker.id" "{{ .Values.logsDirs | splitList "," | first }}/meta.properties" | awk -F '=' '{print $2}')" + else + export KAFKA_CFG_BROKER_ID="$((ID + {{ .Values.minBrokerId }}))" + fi + + {{- if eq .Values.brokerRackAssignment "aws-az" }} + export KAFKA_CFG_BROKER_RACK=$(curl "http://169.254.169.254/latest/meta-data/placement/availability-zone-id") + {{- end }} {{- if .Values.externalAccess.enabled }} # Configure external ip and port {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} {{- if .Values.externalAccess.autoDiscovery.enabled }} - export EXTERNAL_ACCESS_IP="$(<${SHARED_FILE})" + export EXTERNAL_ACCESS_HOST="$(<${SHARED_FILE})" {{- else }} - export EXTERNAL_ACCESS_IP=$(echo '{{ .Values.externalAccess.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + export EXTERNAL_ACCESS_HOST=$(echo '{{ .Values.externalAccess.service.loadBalancerNames | default .Values.externalAccess.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") {{- end }} - export EXTERNAL_ACCESS_PORT={{ .Values.externalAccess.service.port }} + export EXTERNAL_ACCESS_PORT={{ .Values.externalAccess.service.ports.external }} {{- else if eq .Values.externalAccess.service.type "NodePort" }} - {{- if .Values.externalAccess.service.domain }} - export EXTERNAL_ACCESS_IP={{ .Values.externalAccess.service.domain }} + {{- if and .Values.externalAccess.service.usePodIPs .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_HOST="${MY_POD_IP}" + {{- else if or .Values.externalAccess.service.useHostIPs .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_HOST="${HOST_IP}" + {{- else if .Values.externalAccess.service.domain }} + export EXTERNAL_ACCESS_HOST={{ .Values.externalAccess.service.domain }} {{- else }} - export EXTERNAL_ACCESS_IP=$(curl -s https://ipinfo.io/ip) + export EXTERNAL_ACCESS_HOST=$(curl -s https://ipinfo.io/ip) {{- end }} {{- if .Values.externalAccess.autoDiscovery.enabled }} export EXTERNAL_ACCESS_PORT="$(<${SHARED_FILE})" {{- else }} export EXTERNAL_ACCESS_PORT=$(echo '{{ .Values.externalAccess.service.nodePorts }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") {{- end }} + {{- else }} + export EXTERNAL_ACCESS_HOST={{ .Values.externalAccess.service.domain }} + export EXTERNAL_ACCESS_PORT="$((ID + {{ .Values.externalAccess.service.ports.external }}))" {{- end }} # Configure Kafka advertised listeners {{- if .Values.advertisedListeners }} - export KAFKA_CFG_ADVERTISED_LISTENERS={{ .Values.advertisedListeners }} + export KAFKA_CFG_ADVERTISED_LISTENERS={{ join "," .Values.advertisedListeners }} {{- else }} - export KAFKA_CFG_ADVERTISED_LISTENERS="INTERNAL://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }},EXTERNAL://${EXTERNAL_ACCESS_IP}:${EXTERNAL_ACCESS_PORT}" + export KAFKA_CFG_ADVERTISED_LISTENERS="INTERNAL://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }},EXTERNAL://${EXTERNAL_ACCESS_HOST}:${EXTERNAL_ACCESS_PORT}" {{- end }} {{- end }} {{- if (include "kafka.tlsEncryption" .) }} - if [[ -f "/certs/kafka.truststore.jks" ]] && [[ -f "/certs/kafka-${ID}.keystore.jks" ]]; then - mkdir -p /opt/bitnami/kafka/config/certs - cp "/certs/kafka.truststore.jks" "/opt/bitnami/kafka/config/certs/kafka.truststore.jks" - cp "/certs/kafka-${ID}.keystore.jks" "/opt/bitnami/kafka/config/certs/kafka.keystore.jks" + mkdir -p /opt/bitnami/kafka/config/certs + {{- if eq .Values.auth.tls.type "jks" }} + {{- if not (empty .Values.auth.tls.existingSecrets) }} + JKS_TRUSTSTORE={{ printf "/%s/%s" (ternary "certs-${ID}" "truststore" (empty $jksTruststoreSecret)) (default "kafka.truststore.jks" $jksTruststore) | quote }} + JKS_KEYSTORE={{ printf "/certs-${ID}/%s" (default "kafka.keystore.jks" $jksKeystoreSAN) | quote }} + {{- else }} + JKS_TRUSTSTORE={{ printf "/%s/%s" (ternary "certs" "truststore" (empty $jksTruststoreSecret)) (default "kafka.truststore.jks" $jksTruststore) | quote }} + JKS_KEYSTORE={{ printf "/certs/%s" (default "kafka-${ID}.keystore.jks" $jksKeystoreSAN) | quote }} + {{- end }} + if [[ -f "$JKS_TRUSTSTORE" ]] && [[ -f "$JKS_KEYSTORE" ]]; then + cp "$JKS_TRUSTSTORE" "/opt/bitnami/kafka/config/certs/kafka.truststore.jks" + cp "$JKS_KEYSTORE" "/opt/bitnami/kafka/config/certs/kafka.keystore.jks" else echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when encryption via TLS is enabled." exit 1 fi + export KAFKA_TLS_TRUSTSTORE_FILE="/opt/bitnami/kafka/config/certs/kafka.truststore.jks" + + {{- else if eq .Values.auth.tls.type "pem" }} + + {{- if or (not (empty .Values.auth.tls.existingSecrets)) .Values.auth.tls.autoGenerated }} + PEM_CA="/certs-${ID}/ca.crt" + PEM_CERT="/certs-${ID}/tls.crt" + PEM_KEY="/certs-${ID}/tls.key" + {{- else }} + PEM_CA="/certs/kafka.truststore.pem" + PEM_CERT="/certs/kafka-${ID}.keystore.pem" + PEM_KEY="/certs/kafka-${ID}.keystore.key" + {{- end }} + if [[ -f "$PEM_CERT" ]] && [[ -f "$PEM_KEY" ]]; then + CERT_DIR="/opt/bitnami/kafka/config/certs" + PEM_CA_LOCATION="${CERT_DIR}/kafka.truststore.pem" + PEM_CERT_LOCATION="${CERT_DIR}/kafka.keystore.pem" + {{- if .Values.auth.tls.pemChainIncluded }} + cat $PEM_CERT | csplit - -s -z '/\-*END CERTIFICATE\-*/+1' '{*}' -f ${CERT_DIR}/xx + FIND_CA_RESULT=$(find ${CERT_DIR} -not -name 'xx00' -name 'xx*') + if [[ $(echo $FIND_CA_RESULT | wc -l) < 1 ]]; then + echo "auth.tls.pemChainIncluded was set, but PEM chain only contained 1 cert" + exit 1 + fi + echo $FIND_CA_RESULT | sort | xargs cat >> "$PEM_CA_LOCATION" + cat ${CERT_DIR}/xx00 > "$PEM_CERT_LOCATION" + {{- else }} + if [[ -f "$PEM_CA" ]]; then + cp "$PEM_CA" "$PEM_CA_LOCATION" + cp "$PEM_CERT" "$PEM_CERT_LOCATION" + else + echo "PEM_CA not provided, and auth.tls.pemChainIncluded was not true. One of these values must be set when using PEM type for TLS." + exit 1 + fi + {{- end }} + + # Ensure the key used PEM format with PKCS#8 + openssl pkcs8 -topk8 -nocrypt -in "$PEM_KEY" > "/opt/bitnami/kafka/config/certs/kafka.keystore.key" + else + echo "Couldn't find the expected PEM files! They are mandatory when encryption via TLS is enabled." + exit 1 + fi + export KAFKA_TLS_TRUSTSTORE_FILE="/opt/bitnami/kafka/config/certs/kafka.truststore.pem" + {{- end }} + {{- end }} + + # Configure zookeeper client + {{- if and (not (empty .Values.auth.zookeeper.tls.existingSecret)) .Values.auth.zookeeper.tls.enabled }} + JKS_TRUSTSTORE={{ printf "/kafka-zookeeper-cert/%s" (.Values.auth.zookeeper.tls.existingSecretTruststoreKey) | quote }} + JKS_KEYSTORE={{ printf "/kafka-zookeeper-cert/%s" (.Values.auth.zookeeper.tls.existingSecretKeystoreKey) | quote }} + if [[ -f "$JKS_TRUSTSTORE" ]] && [[ -f "$JKS_KEYSTORE" ]]; then + CERT_DIR="/opt/bitnami/kafka/config/certs" + TRUSTSTORE_LOCATION="${CERT_DIR}/zookeeper.truststore.jks" + cp "$JKS_TRUSTSTORE" "$TRUSTSTORE_LOCATION" + cp "$JKS_KEYSTORE" "${CERT_DIR}/zookeeper.keystore.jks" + export KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_FILE="${TRUSTSTORE_LOCATION}" + fi {{- end }} exec /entrypoint.sh /run.sh diff --git a/scripts/helmcharts/databases/charts/kafka/templates/serviceaccount.yaml b/scripts/helmcharts/databases/charts/kafka/templates/serviceaccount.yaml old mode 100755 new mode 100644 index 790790b3f..73091f5d7 --- a/scripts/helmcharts/databases/charts/kafka/templates/serviceaccount.yaml +++ b/scripts/helmcharts/databases/charts/kafka/templates/serviceaccount.yaml @@ -3,12 +3,18 @@ apiVersion: v1 kind: ServiceAccount metadata: name: {{ template "kafka.serviceAccountName" . }} - labels: {{- include "kafka.labels" . | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka {{- if .Values.commonLabels }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} {{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml b/scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml old mode 100755 new mode 100644 index 250bb5306..1919feebb --- a/scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml +++ b/scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml @@ -2,25 +2,33 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - name: {{ template "kafka.fullname" . }}-jmx-metrics + name: {{ printf "%s-jmx-metrics" (include "common.names.fullname" .) }} {{- if .Values.metrics.serviceMonitor.namespace }} namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} {{- end }} - labels: {{- include "kafka.labels" . | nindent 4 }} - app.kubernetes.io/component: kafka - {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} - {{ $key }}: {{ $value | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonLabels }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} selector: - matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} - app.kubernetes.io/component: kafka + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: metrics endpoints: - port: http-metrics path: "/" @@ -30,6 +38,15 @@ spec: {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} namespaceSelector: matchNames: - {{ .Release.Namespace }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-metrics.yaml b/scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-metrics.yaml old mode 100755 new mode 100644 index 951bf7c41..343194667 --- a/scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-metrics.yaml +++ b/scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-metrics.yaml @@ -2,25 +2,33 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: - name: {{ template "kafka.fullname" . }}-metrics + name: {{ printf "%s-metrics" (include "common.names.fullname" .) }} {{- if .Values.metrics.serviceMonitor.namespace }} namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} {{- end }} - labels: {{- include "kafka.labels" . | nindent 4 }} - app.kubernetes.io/component: metrics - {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} - {{ $key }}: {{ $value | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/component: cluster-metrics + {{- if .Values.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonLabels }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} spec: + {{- if .Values.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel }} + {{- end }} selector: - matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} - app.kubernetes.io/component: metrics + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/component: cluster-metrics endpoints: - port: http-metrics path: "/metrics" @@ -30,6 +38,15 @@ spec: {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} namespaceSelector: matchNames: - {{ .Release.Namespace }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/statefulset.yaml b/scripts/helmcharts/databases/charts/kafka/templates/statefulset.yaml old mode 100755 new mode 100644 index e9b5ce8f9..9532a5734 --- a/scripts/helmcharts/databases/charts/kafka/templates/statefulset.yaml +++ b/scripts/helmcharts/databases/charts/kafka/templates/statefulset.yaml @@ -1,54 +1,47 @@ {{- $replicaCount := int .Values.replicaCount }} -{{- $fullname := include "kafka.fullname" . }} +{{- $fullname := include "common.names.fullname" . }} {{- $releaseNamespace := .Release.Namespace }} {{- $clusterDomain := .Values.clusterDomain }} -{{- $interBrokerPort := .Values.service.internalPort }} -{{- $clientPort := .Values.service.port }} -{{- $interBrokerProtocol := include "kafka.listenerType" ( dict "protocol" .Values.auth.interBrokerProtocol ) -}} -{{- $clientProtocol := include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) -}} -{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }} -{{- if not (and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer")) }} -apiVersion: apps/v1 +{{- $interBrokerProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.interBrokerProtocol) -}} +{{- $clientProtocol := include "kafka.listenerType" (dict "protocol" .Values.auth.clientProtocol) -}} +{{- $externalClientProtocol := include "kafka.listenerType" (dict "protocol" (include "kafka.externalClientProtocol" . )) -}} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" . }} kind: StatefulSet metadata: - name: {{ include "kafka.fullname" . }} - labels: {{- include "kafka.labels" . | nindent 4 }} + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka {{- if .Values.commonLabels }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} spec: - podManagementPolicy: Parallel + podManagementPolicy: {{ .Values.podManagementPolicy }} replicas: {{ .Values.replicaCount }} selector: - matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} app.kubernetes.io/component: kafka - serviceName: {{ template "kafka.fullname" . }}-headless - updateStrategy: - type: {{ .Values.updateStrategy | quote }} - {{- if (eq "OnDelete" .Values.updateStrategy) }} - rollingUpdate: null - {{- else if .Values.rollingUpdatePartition }} - rollingUpdate: - partition: {{ .Values.rollingUpdatePartition }} - {{- end }} + serviceName: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + updateStrategy: {{- include "common.tplvalues.render" (dict "value" .Values.updateStrategy "context" $ ) | nindent 4 }} template: metadata: - labels: {{- include "kafka.labels" . | nindent 8 }} + labels: {{- include "common.labels.standard" . | nindent 8 }} app.kubernetes.io/component: kafka {{- if .Values.podLabels }} - {{- include "kafka.tplValue" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 8 }} {{- end }} - {{- if or (include "kafka.createConfigmap" .) (include "kafka.createJaasSecret" .) .Values.externalAccess.enabled (include "kafka.metrics.jmx.createConfigmap" .) .Values.podAnnotations }} annotations: {{- if (include "kafka.createConfigmap" .) }} checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} {{- end }} {{- if (include "kafka.createJaasSecret" .) }} - checksum/secret: {{ include (print $.Template.BasePath "/jaas-secret.yaml") . | sha256sum }} + checksum/jaas-secret: {{ include (print $.Template.BasePath "/jaas-secret.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.createTlsSecret" .) }} + checksum/tls-secret: {{ include (print $.Template.BasePath "/tls-secrets.yaml") . | sha256sum }} {{- end }} {{- if .Values.externalAccess.enabled }} checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} @@ -57,30 +50,46 @@ spec: checksum/jmx-configuration: {{ include (print $.Template.BasePath "/jmx-configmap.yaml") . | sha256sum }} {{- end }} {{- if .Values.podAnnotations }} - {{- include "kafka.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- include "common.tplvalues.render" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} {{- end }} - {{- end }} spec: -{{- include "kafka.imagePullSecrets" . | indent 6 }} + {{- include "kafka.imagePullSecrets" . | nindent 6 }} + {{- if .Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + hostNetwork: {{ .Values.hostNetwork }} + hostIPC: {{ .Values.hostIPC }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName | quote }} + {{- end }} {{- if .Values.affinity }} - affinity: {{- include "kafka.tplValue" ( dict "value" .Values.affinity "context" $ ) | nindent 8 }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAffinityPreset "component" "kafka" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.podAntiAffinityPreset "component" "kafka" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.nodeAffinityPreset.type "key" .Values.nodeAffinityPreset.key "values" .Values.nodeAffinityPreset.values) | nindent 10 }} {{- end }} {{- if .Values.nodeSelector }} - nodeSelector: {{- include "kafka.tplValue" ( dict "value" .Values.nodeSelector "context" $ ) | nindent 8 }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} {{- end }} {{- if .Values.tolerations }} - tolerations: {{- include "kafka.tplValue" ( dict "value" .Values.tolerations "context" $ ) | nindent 8 }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} {{- end }} {{- if .Values.priorityClassName }} priorityClassName: {{ .Values.priorityClassName }} {{- end }} - {{- if .Values.podSecurityContext }} - securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- if .Values.podSecurityContext.enabled }} + securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }} {{- end }} - {{- if .Values.serviceAccount.create }} - serviceAccountName: {{ template "kafka.serviceAccountName" . }} - {{- end }} - {{- if or (and .Values.volumePermissions.enabled .Values.persistence.enabled) (and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled) }} + serviceAccountName: {{ include "kafka.serviceAccountName" . }} + {{- if or (and .Values.volumePermissions.enabled .Values.persistence.enabled) (and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled) .Values.initContainers }} initContainers: {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} - name: volume-permissions @@ -91,16 +100,23 @@ spec: args: - -ec - | - mkdir -p /bitnami/kafka - chown -R "{{ .Values.podSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "/bitnami/kafka" - securityContext: - runAsUser: 0 + mkdir -p "{{ .Values.persistence.mountPath }}" "{{ .Values.logPersistence.mountPath }}" + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} "{{ .Values.persistence.mountPath }}" "{{ .Values.logPersistence.mountPath }}" + find "{{ .Values.persistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + find "{{ .Values.logPersistence.mountPath }}" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- if eq ( toString ( .Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.containerSecurityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.containerSecurityContext | toYaml | nindent 12 }} + {{- end }} {{- if .Values.volumePermissions.resources }} resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} {{- end }} volumeMounts: - name: data - mountPath: /bitnami/kafka + mountPath: {{ .Values.persistence.mountPath }} + - name: logs + mountPath: {{ .Values.logPersistence.mountPath }} {{- end }} {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} - name: auto-discovery @@ -121,22 +137,36 @@ spec: volumeMounts: - name: shared mountPath: /shared + - name: logs + mountPath: {{ .Values.logPersistence.mountPath }} - name: scripts mountPath: /scripts/auto-discovery.sh subPath: auto-discovery.sh {{- end }} + {{- if .Values.initContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.initContainers "context" $ ) | nindent 8 }} + {{- end }} {{- end }} containers: - name: kafka image: {{ include "kafka.image" . }} imagePullPolicy: {{ .Values.image.pullPolicy | quote }} - command: {{- include "kafka.tplValue" (dict "value" .Values.command "context" $) | nindent 12 }} - {{- if .Values.args }} - args: {{- include "kafka.tplValue" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.args "context" $) | nindent 12 }} {{- end }} env: - name: BITNAMI_DEBUG - value: {{ ternary "true" "false" .Values.image.debug | quote }} + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} - name: MY_POD_IP valueFrom: fieldRef: @@ -147,9 +177,9 @@ spec: fieldPath: metadata.name - name: KAFKA_CFG_ZOOKEEPER_CONNECT {{- if .Values.zookeeper.enabled }} - value: {{ include "kafka.zookeeper.fullname" . | quote }} + value: {{ printf "%s%s" (include "kafka.zookeeper.fullname" .) (tpl .Values.zookeeperChrootPath .) | quote }} {{- else }} - value: {{ join "," .Values.externalZookeeper.servers | quote }} + value: {{ include "common.tplvalues.render" (dict "value" (printf "%s%s" (join "," .Values.externalZookeeper.servers) (tpl .Values.zookeeperChrootPath .)) "context" $) }} {{- end }} - name: KAFKA_INTER_BROKER_LISTENER_NAME value: {{ .Values.interBrokerListenerName | quote }} @@ -157,65 +187,60 @@ spec: {{- if .Values.listenerSecurityProtocolMap }} value: {{ .Values.listenerSecurityProtocolMap | quote }} {{- else if .Values.externalAccess.enabled }} - value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }},EXTERNAL:{{ $clientProtocol }}" + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }},EXTERNAL:{{ $externalClientProtocol }}" {{- else }} value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }}" {{- end }} - {{- if or ($clientProtocol | regexFind "SASL") ($interBrokerProtocol | regexFind "SASL") .Values.auth.jaas.zookeeperUser }} + {{- if or ($clientProtocol | regexFind "SASL") ($externalClientProtocol | regexFind "SASL") ($interBrokerProtocol | regexFind "SASL") .Values.auth.sasl.jaas.zookeeperUser }} - name: KAFKA_CFG_SASL_ENABLED_MECHANISMS - value: {{ include "kafka.auth.saslMechanisms" ( dict "type" .Values.auth.saslMechanisms ) }} + value: {{ upper .Values.auth.sasl.mechanisms | quote }} - name: KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL - value: {{ upper .Values.auth.saslInterBrokerMechanism | quote }} + value: {{ upper .Values.auth.sasl.interBrokerMechanism | quote }} {{- end }} - name: KAFKA_CFG_LISTENERS {{- if .Values.listeners }} - value: {{ .Values.listeners }} + value: {{ join "," .Values.listeners }} {{- else if .Values.externalAccess.enabled }} - value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092,EXTERNAL://:9094" + value: "INTERNAL://:{{ .Values.containerPorts.internal }},CLIENT://:{{ .Values.containerPorts.client }},EXTERNAL://:{{ .Values.containerPorts.external }}" {{- else }} - value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092" + value: "INTERNAL://:{{ .Values.containerPorts.internal }},CLIENT://:{{ .Values.containerPorts.client }}" {{- end }} {{- if .Values.externalAccess.enabled }} {{- if .Values.externalAccess.autoDiscovery.enabled }} - name: SHARED_FILE value: "/shared/info.txt" {{- end }} + {{- if eq .Values.externalAccess.service.type "NodePort" }} + - name: HOST_IP + valueFrom: + fieldRef: + fieldPath: status.hostIP + {{- end }} {{- else }} - name: KAFKA_CFG_ADVERTISED_LISTENERS {{- if .Values.advertisedListeners }} - value: {{ .Values.advertisedListeners }} + value: {{ join "," .Values.advertisedListeners }} {{- else }} - value: "INTERNAL://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }}" + value: "INTERNAL://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ .Values.service.ports.internal }},CLIENT://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ .Values.service.ports.client }}" {{- end }} {{- end }} - name: ALLOW_PLAINTEXT_LISTENER - value: {{ ternary "yes" "no" (or .Values.auth.enabled .Values.allowPlaintextListener) | quote }} + value: {{ ternary "yes" "no" .Values.allowPlaintextListener | quote }} {{- if or (include "kafka.client.saslAuthentication" .) (include "kafka.interBroker.saslAuthentication" .) }} - name: KAFKA_OPTS value: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf" {{- if (include "kafka.client.saslAuthentication" .) }} - name: KAFKA_CLIENT_USERS - value: {{ join "," .Values.auth.jaas.clientUsers | quote }} + value: {{ join "," .Values.auth.sasl.jaas.clientUsers | quote }} - name: KAFKA_CLIENT_PASSWORDS valueFrom: secretKeyRef: name: {{ include "kafka.jaasSecretName" . }} key: client-passwords {{- end }} - {{- if .Values.auth.jaas.zookeeperUser }} - - name: KAFKA_ZOOKEEPER_PROTOCOL - value: "SASL" - - name: KAFKA_ZOOKEEPER_USER - value: {{ .Values.auth.jaas.zookeeperUser | quote }} - - name: KAFKA_ZOOKEEPER_PASSWORD - valueFrom: - secretKeyRef: - name: {{ include "kafka.jaasSecretName" . }} - key: zookeeper-password - {{- end }} {{- if (include "kafka.interBroker.saslAuthentication" .) }} - name: KAFKA_INTER_BROKER_USER - value: {{ .Values.auth.jaas.interBrokerUser | quote }} + value: {{ .Values.auth.sasl.jaas.interBrokerUser | quote }} - name: KAFKA_INTER_BROKER_PASSWORD valueFrom: secretKeyRef: @@ -223,18 +248,60 @@ spec: key: inter-broker-password {{- end }} {{- end }} - {{- if (include "kafka.tlsEncryption" .) }} - - name: KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM - value: {{ .Values.auth.tlsEndpointIdentificationAlgorithm | quote }} - {{- if .Values.auth.jksPassword }} - - name: KAFKA_CERTIFICATE_PASSWORD - value: {{ .Values.auth.jksPassword | quote }} + {{- if and .Values.zookeeper.auth.client.enabled .Values.auth.sasl.jaas.zookeeperUser }} + - name: KAFKA_ZOOKEEPER_USER + value: {{ .Values.auth.sasl.jaas.zookeeperUser | quote }} + - name: KAFKA_ZOOKEEPER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: zookeeper-password {{- end }} + - name: KAFKA_ZOOKEEPER_PROTOCOL + value: {{ include "kafka.zookeeper.protocol" . }} + {{- if .Values.auth.zookeeper.tls.enabled }} + - name: KAFKA_ZOOKEEPER_TLS_TYPE + value: {{ upper .Values.auth.zookeeper.tls.type | quote }} + - name: KAFKA_ZOOKEEPER_TLS_VERIFY_HOSTNAME + value: {{ .Values.auth.zookeeper.tls.verifyHostname | quote }} + {{- if .Values.auth.zookeeper.tls.passwordsSecret }} + - name: KAFKA_ZOOKEEPER_TLS_KEYSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.auth.zookeeper.tls.passwordsSecret }} + key: {{ .Values.auth.zookeeper.tls.passwordsSecretKeystoreKey | quote }} + - name: KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.auth.zookeeper.tls.passwordsSecret }} + key: {{ .Values.auth.zookeeper.tls.passwordsSecretTruststoreKey | quote }} + {{- end }} + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: KAFKA_TLS_TYPE + value: {{ upper .Values.auth.tls.type | quote }} + - name: KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM + value: {{ default "" .Values.auth.tls.endpointIdentificationAlgorithm | quote }} + - name: KAFKA_TLS_CLIENT_AUTH + value: {{ ternary "required" "none" (or (eq (include "kafka.externalClientProtocol" . ) "mtls") (eq .Values.auth.clientProtocol "mtls")) | quote }} + - name: KAFKA_CERTIFICATE_PASSWORD + {{- if .Values.auth.tls.existingSecret }} + valueFrom: + secretKeyRef: + name: {{ .Values.auth.tls.existingSecret }} + key: password + {{- else }} + value: {{ default "" .Values.auth.tls.password | quote }} + {{- end }} {{- end }} {{- if .Values.metrics.jmx.enabled }} - name: JMX_PORT value: "5555" {{- end }} + - name: KAFKA_VOLUME_DIR + value: {{ .Values.persistence.mountPath | quote }} + - name: KAFKA_LOG_DIR + value: {{ .Values.logPersistence.mountPath | quote }} - name: KAFKA_CFG_DELETE_TOPIC_ENABLE value: {{ .Values.deleteTopicEnable | quote }} - name: KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE @@ -242,12 +309,12 @@ spec: - name: KAFKA_HEAP_OPTS value: {{ .Values.heapOpts | quote }} - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES - value: {{ .Values.logFlushIntervalMessages | quote }} + value: {{ .Values.logFlushIntervalMessages | replace "_" "" | quote }} - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS value: {{ .Values.logFlushIntervalMs | quote }} - name: KAFKA_CFG_LOG_RETENTION_BYTES value: {{ .Values.logRetentionBytes | replace "_" "" | quote }} - - name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVALS_MS + - name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVAL_MS value: {{ .Values.logRetentionCheckIntervalMs | quote }} - name: KAFKA_CFG_LOG_RETENTION_HOURS value: {{ .Values.logRetentionHours | quote }} @@ -281,56 +348,77 @@ spec: value: {{ .Values.socketSendBufferBytes | quote }} - name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS value: {{ .Values.zookeeperConnectionTimeoutMs | quote }} + - name: KAFKA_CFG_AUTHORIZER_CLASS_NAME + value: {{ .Values.authorizerClassName | quote }} + - name: KAFKA_CFG_ALLOW_EVERYONE_IF_NO_ACL_FOUND + value: {{ .Values.allowEveryoneIfNoAclFound | quote }} + - name: KAFKA_CFG_SUPER_USERS + value: {{ .Values.superUsers | quote }} {{- if .Values.extraEnvVars }} - {{ include "kafka.tplValue" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} {{- end }} + {{- if or .Values.extraEnvVarsCM .Values.extraEnvVarsSecret }} + envFrom: + {{- if .Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- end }} ports: - name: kafka-client - containerPort: 9092 + containerPort: {{ .Values.containerPorts.client }} - name: kafka-internal - containerPort: {{ $interBrokerPort }} + containerPort: {{ .Values.containerPorts.internal }} {{- if .Values.externalAccess.enabled }} - name: kafka-external - containerPort: 9094 + containerPort: {{ .Values.containerPorts.external }} {{- end }} - {{- if .Values.livenessProbe.enabled }} - livenessProbe: + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.livenessProbe "enabled") "context" $) | nindent 12 }} tcpSocket: port: kafka-client - initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} - timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} - failureThreshold: {{ .Values.livenessProbe.failureThreshold }} - periodSeconds: {{ .Values.livenessProbe.periodSeconds }} - successThreshold: {{ .Values.livenessProbe.successThreshold }} - {{- else if .Values.customLivenessProbe }} - livenessProbe: {{- include "kafka.tplValue" (dict "value" .Values.customlivenessProbe "context" $) | nindent 12 }} {{- end }} - {{- if .Values.readinessProbe.enabled }} - readinessProbe: + {{- if .Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.readinessProbe "enabled") "context" $) | nindent 12 }} tcpSocket: port: kafka-client - initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} - timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} - failureThreshold: {{ .Values.readinessProbe.failureThreshold }} - periodSeconds: {{ .Values.readinessProbe.periodSeconds }} - successThreshold: {{ .Values.readinessProbe.successThreshold }} - {{- else if .Values.customReadinessProbe }} - readinessProbe: {{- include "kafka.tplValue" (dict "value" .Values.customreadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: kafka-client + {{- end }} + {{- end }} + {{- if .Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.lifecycleHooks "context" $) | nindent 12 }} {{- end }} {{- if .Values.resources }} resources: {{- toYaml .Values.resources | nindent 12 }} {{- end }} volumeMounts: - name: data - mountPath: /bitnami/kafka + mountPath: {{ .Values.persistence.mountPath }} + - name: logs + mountPath: {{ .Values.logPersistence.mountPath }} {{- if or .Values.config .Values.existingConfigmap }} - name: kafka-config - mountPath: /bitnami/kafka/config/server.properties + mountPath: {{ .Values.persistence.mountPath }}/config/server.properties subPath: server.properties {{- end }} {{- if or .Values.log4j .Values.existingLog4jConfigMap }} - name: log4j-config - mountPath: /bitnami/kafka/config/log4j.properties + mountPath: {{ .Values.persistence.mountPath }}/config/log4j.properties subPath: log4j.properties {{- end }} - name: scripts @@ -341,30 +429,57 @@ spec: mountPath: /shared {{- end }} {{- if (include "kafka.tlsEncryption" .) }} - - name: kafka-certificates - mountPath: /certs + {{- if not (empty .Values.auth.tls.existingSecrets) }} + {{- range $index, $_ := .Values.auth.tls.existingSecrets }} + - name: kafka-certs-{{ $index }} + mountPath: /certs-{{ $index }} readOnly: true {{- end }} + {{- else if .Values.auth.tls.autoGenerated }} + {{- range $index := until $replicaCount }} + - name: kafka-certs-{{ $index }} + mountPath: /certs-{{ $index }} + readOnly: true + {{- end }} + {{- end }} + {{- if and .Values.auth.zookeeper.tls.enabled .Values.auth.zookeeper.tls.existingSecret }} + - name: kafka-zookeeper-cert + mountPath: /kafka-zookeeper-cert + readOnly: true + {{- end }} + {{- if .Values.auth.tls.jksTruststoreSecret }} + - name: kafka-truststore + mountPath: /truststore + readOnly: true + {{- end }} + {{- end }} {{- if .Values.extraVolumeMounts }} - {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumeMounts "context" $) | nindent 12 }} {{- end }} {{- if .Values.metrics.jmx.enabled }} - name: jmx-exporter - image: {{ template "kafka.metrics.jmx.image" . }} + image: {{ include "kafka.metrics.jmx.image" . }} imagePullPolicy: {{ .Values.metrics.jmx.image.pullPolicy | quote }} + {{- if .Values.metrics.jmx.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.metrics.jmx.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else }} command: - java - - -XX:+UnlockExperimentalVMOptions - - -XX:+UseCGroupMemoryLimitForHeap - - -XX:MaxRAMFraction=1 + args: + - -XX:MaxRAMPercentage=100 - -XshowSettings:vm - -jar - jmx_prometheus_httpserver.jar - "5556" - /etc/jmx-kafka/jmx-kafka-prometheus.yml + {{- end }} ports: - name: metrics - containerPort: 5556 + containerPort: {{ .Values.metrics.jmx.containerPorts.metrics }} {{- if .Values.metrics.jmx.resources }} resources: {{- toYaml .Values.metrics.jmx.resources | nindent 12 }} {{- end }} @@ -373,7 +488,7 @@ spec: mountPath: /etc/jmx-kafka {{- end }} {{- if .Values.sidecars }} - {{- include "kafka.tplValue" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- include "common.tplvalues.render" (dict "value" .Values.sidecars "context" $) | nindent 8 }} {{- end }} volumes: {{- if or .Values.config .Values.existingConfigmap }} @@ -388,7 +503,7 @@ spec: {{ end }} - name: scripts configMap: - name: {{ include "kafka.fullname" . }}-scripts + name: {{ include "common.names.fullname" . }}-scripts defaultMode: 0755 {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} - name: shared @@ -400,13 +515,36 @@ spec: name: {{ include "kafka.metrics.jmx.configmapName" . }} {{- end }} {{- if (include "kafka.tlsEncryption" .) }} - - name: kafka-certificates + {{- if not (empty .Values.auth.tls.existingSecrets) }} + {{- range $index, $secret := .Values.auth.tls.existingSecrets }} + - name: kafka-certs-{{ $index }} secret: - secretName: {{ include "kafka.jksSecretName" . }} + secretName: {{ tpl $secret $ }} defaultMode: 256 {{- end }} + {{- else if .Values.auth.tls.autoGenerated }} + {{- range $index := until $replicaCount }} + - name: kafka-certs-{{ $index }} + secret: + secretName: {{ printf "%s-%d-tls" (include "common.names.fullname" $) $index }} + defaultMode: 256 + {{- end }} + {{- end }} + {{- if and .Values.auth.zookeeper.tls.enabled .Values.auth.zookeeper.tls.existingSecret }} + - name: kafka-zookeeper-cert + secret: + secretName: {{ .Values.auth.zookeeper.tls.existingSecret }} + defaultMode: 256 + {{- end }} + {{- if .Values.auth.tls.jksTruststoreSecret }} + - name: kafka-truststore + secret: + secretName: {{ .Values.auth.tls.jksTruststoreSecret }} + defaultMode: 256 + {{- end }} + {{- end }} {{- if .Values.extraVolumes }} - {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraVolumes "context" $) | nindent 8 }} {{- end }} {{- if not .Values.persistence.enabled }} - name: data @@ -415,12 +553,26 @@ spec: - name: data persistentVolumeClaim: claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} -{{- else }} +{{- end }} +{{- if not .Values.logPersistence.enabled }} + - name: logs + emptyDir: {} +{{- else if .Values.logPersistence.existingClaim }} + - name: logs + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.logPersistence.existingClaim .) }} +{{- end }} + {{- if or (and .Values.persistence.enabled (not .Values.persistence.existingClaim)) (and .Values.logPersistence.enabled (not .Values.logPersistence.existingClaim)) }} volumeClaimTemplates: + {{- end }} +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} - metadata: name: data {{- if .Values.persistence.annotations }} - annotations: {{- include "kafka.tplValue" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + {{- if .Values.persistence.labels }} + labels: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.labels "context" $) | nindent 10 }} {{- end }} spec: accessModes: @@ -431,5 +583,26 @@ spec: requests: storage: {{ .Values.persistence.size | quote }} {{ include "kafka.storageClass" . | nindent 8 }} + {{- if .Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.persistence.selector "context" $) | nindent 10 }} + {{- end -}} {{- end }} +{{- if and .Values.logPersistence.enabled (not .Values.logPersistence.existingClaim) }} + - metadata: + name: logs + {{- if .Values.logPersistence.annotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.logPersistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.logPersistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.logPersistence.size | quote }} + {{ include "kafka.storageClass" . | nindent 8 }} + {{- if .Values.logPersistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" .Values.logPersistence.selector "context" $) | nindent 10 }} + {{- end -}} {{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/svc-external-access.yaml b/scripts/helmcharts/databases/charts/kafka/templates/svc-external-access.yaml old mode 100755 new mode 100644 index eefe0046d..8d77a4710 --- a/scripts/helmcharts/databases/charts/kafka/templates/svc-external-access.yaml +++ b/scripts/helmcharts/databases/charts/kafka/templates/svc-external-access.yaml @@ -1,50 +1,61 @@ {{- if .Values.externalAccess.enabled }} -{{- $fullName := include "kafka.fullname" . }} +{{- $fullName := include "common.names.fullname" . }} {{- $replicaCount := .Values.replicaCount | int }} {{- $root := . }} {{- range $i, $e := until $replicaCount }} {{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }} ---- +{{- $_ := set $ "targetPod" $targetPod }} apiVersion: v1 kind: Service metadata: - name: {{ template "kafka.fullname" $ }}-{{ $i }}-external - labels: {{- include "kafka.labels" $ | nindent 4 }} + name: {{ printf "%s-%d-external" (include "common.names.fullname" $) $i | trunc 63 | trimSuffix "-" }} + namespace: {{ $root.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} app.kubernetes.io/component: kafka pod: {{ $targetPod }} {{- if $root.Values.commonLabels }} - {{- include "kafka.tplValue" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} - {{- if or $root.Values.externalAccess.service.annotations $root.Values.commonAnnotations }} + {{- if $root.Values.externalAccess.service.labels }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.externalAccess.service.labels "context" $) | nindent 4 }} + {{- end }} + {{- if or $root.Values.externalAccess.service.annotations $root.Values.commonAnnotations $root.Values.externalAccess.service.loadBalancerAnnotations }} annotations: + {{- if and (not (empty $root.Values.externalAccess.service.loadBalancerAnnotations)) (eq (len $root.Values.externalAccess.service.loadBalancerAnnotations) $replicaCount) }} + {{ include "common.tplvalues.render" ( dict "value" (index $root.Values.externalAccess.service.loadBalancerAnnotations $i) "context" $) | nindent 4 }} + {{- end }} {{- if $root.Values.externalAccess.service.annotations }} - {{ include "kafka.tplValue" ( dict "value" $root.Values.externalAccess.service.annotations "context" $) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.externalAccess.service.annotations "context" $) | nindent 4 }} {{- end }} {{- if $root.Values.commonAnnotations }} - {{- include "kafka.tplValue" ( dict "value" $root.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" $root.Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} {{- end }} spec: type: {{ $root.Values.externalAccess.service.type }} {{- if eq $root.Values.externalAccess.service.type "LoadBalancer" }} - {{- if not (empty $root.Values.externalAccess.service.loadBalancerIPs) }} + {{- if and (not (empty $root.Values.externalAccess.service.loadBalancerIPs)) (eq (len $root.Values.externalAccess.service.loadBalancerIPs) $replicaCount) }} loadBalancerIP: {{ index $root.Values.externalAccess.service.loadBalancerIPs $i }} {{- end }} {{- if $root.Values.externalAccess.service.loadBalancerSourceRanges }} loadBalancerSourceRanges: {{- toYaml $root.Values.externalAccess.service.loadBalancerSourceRanges | nindent 4 }} {{- end }} {{- end }} + publishNotReadyAddresses: {{ $root.Values.externalAccess.service.publishNotReadyAddresses }} ports: - name: tcp-kafka - port: {{ $root.Values.externalAccess.service.port }} + port: {{ $root.Values.externalAccess.service.ports.external }} {{- if not (empty $root.Values.externalAccess.service.nodePorts) }} nodePort: {{ index $root.Values.externalAccess.service.nodePorts $i }} {{- else }} nodePort: null {{- end }} targetPort: kafka-external - selector: {{- include "kafka.matchLabels" $ | nindent 4 }} + {{- if $root.Values.externalAccess.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $root.Values.externalAccess.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" $ | nindent 4 }} app.kubernetes.io/component: kafka statefulset.kubernetes.io/pod-name: {{ $targetPod }} --- diff --git a/scripts/helmcharts/databases/charts/kafka/templates/svc-headless.yaml b/scripts/helmcharts/databases/charts/kafka/templates/svc-headless.yaml old mode 100755 new mode 100644 index e7c2e5e6e..af462126a --- a/scripts/helmcharts/databases/charts/kafka/templates/svc-headless.yaml +++ b/scripts/helmcharts/databases/charts/kafka/templates/svc-headless.yaml @@ -1,26 +1,37 @@ apiVersion: v1 kind: Service metadata: - name: {{ template "kafka.fullname" . }}-headless - labels: {{- include "kafka.labels" . | nindent 4 }} + name: {{ printf "%s-headless" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka - {{- if .Values.commonLabels }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.service.headless.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.service.headless.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.service.headless.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.service.headless.annotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.headless.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} {{- end }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} spec: type: ClusterIP clusterIP: None + publishNotReadyAddresses: {{ .Values.service.headless.publishNotReadyAddresses }} ports: - name: tcp-client - port: {{ .Values.service.port }} + port: {{ .Values.service.ports.client }} protocol: TCP targetPort: kafka-client - name: tcp-internal - port: {{ .Values.service.internalPort }} + port: {{ .Values.service.ports.internal }} protocol: TCP targetPort: kafka-internal - selector: {{- include "kafka.matchLabels" . | nindent 4 }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} app.kubernetes.io/component: kafka diff --git a/scripts/helmcharts/databases/charts/kafka/templates/svc.yaml b/scripts/helmcharts/databases/charts/kafka/templates/svc.yaml old mode 100755 new mode 100644 index 189cb9ffd..8e0472a1d --- a/scripts/helmcharts/databases/charts/kafka/templates/svc.yaml +++ b/scripts/helmcharts/databases/charts/kafka/templates/svc.yaml @@ -1,34 +1,45 @@ apiVersion: v1 kind: Service metadata: - name: {{ template "kafka.fullname" . }} - labels: {{- include "kafka.labels" . | nindent 4 }} + name: {{ template "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} app.kubernetes.io/component: kafka {{- if .Values.commonLabels }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} {{- end }} {{- if or .Values.service.annotations .Values.commonAnnotations }} annotations: {{- if .Values.service.annotations }} - {{ include "kafka.tplValue" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{ include "common.tplvalues.render" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} {{- end }} {{- if .Values.commonAnnotations }} - {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} {{- end }} spec: type: {{ .Values.service.type }} - {{- if eq .Values.service.type "LoadBalancer" }} - {{- if .Values.service.loadBalancerIP }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{ .Values.service.loadBalancerSourceRanges }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} loadBalancerIP: {{ .Values.service.loadBalancerIP }} {{- end }} - {{- if .Values.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} {{- end }} ports: - name: tcp-client - port: {{ .Values.service.port }} + port: {{ .Values.service.ports.client }} protocol: TCP targetPort: kafka-client {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} @@ -36,14 +47,17 @@ spec: {{- else if eq .Values.service.type "ClusterIP" }} nodePort: null {{- end }} - {{- if and .Values.externalAccess.enabled (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) }} + {{- if .Values.externalAccess.enabled }} - name: tcp-external - port: {{ .Values.service.externalPort }} + port: {{ .Values.service.ports.external }} protocol: TCP targetPort: kafka-external {{- if (not (empty .Values.service.nodePorts.external)) }} nodePort: {{ .Values.service.nodePorts.external }} {{- end }} {{- end }} - selector: {{- include "kafka.matchLabels" . | nindent 4 }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} app.kubernetes.io/component: kafka diff --git a/scripts/helmcharts/databases/charts/kafka/templates/tls-secrets.yaml b/scripts/helmcharts/databases/charts/kafka/templates/tls-secrets.yaml new file mode 100644 index 000000000..d6b1adc28 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/tls-secrets.yaml @@ -0,0 +1,31 @@ +{{- if (include "kafka.createTlsSecret" .) }} +{{- $replicaCount := int .Values.replicaCount }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $fullname := include "common.names.fullname" . }} +{{- $ca := genCA "kafka-ca" 365 }} +{{- range $i := until $replicaCount }} +{{- $secretName := printf "%s-%d-tls" (include "common.names.fullname" $) $i }} +{{- $replicaHost := printf "%s-%d.%s-headless" $fullname $i $fullname }} +{{- $altNames := list (printf "%s.%s.svc.%s" $replicaHost $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s" $replicaHost $releaseNamespace) (printf "%s.%s" $fullname $releaseNamespace) $replicaHost $fullname }} +{{- $cert := genSignedCert $replicaHost nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-%d-tls" (include "common.names.fullname" $) $i }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +--- +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/values-production.yaml b/scripts/helmcharts/databases/charts/kafka/values-production.yaml deleted file mode 100755 index af6f43dba..000000000 --- a/scripts/helmcharts/databases/charts/kafka/values-production.yaml +++ /dev/null @@ -1,931 +0,0 @@ -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets -## -# global: -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName -# storageClass: myStorageClass - -## Bitnami Kafka image version -## ref: https://hub.docker.com/r/bitnami/kafka/tags/ -## -image: - registry: docker.io - repository: bitnami/kafka - tag: 2.6.0-debian-10-r30 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - - ## Set to true if you would like to see extra information on logs - ## - debug: false - -## String to partially override kafka.fullname template (will maintain the release name) -## -# nameOverride: - -## String to fully override kafka.fullname template -## -# fullnameOverride: - -## Kubernetes Cluster Domain -## -clusterDomain: cluster.local - -## Add labels to all the deployed resources -## -commonLabels: {} - -## Add annotations to all the deployed resources -## -commonAnnotations: {} - -## Kafka Configuration -## Specify content for server.properties -## The server.properties is auto-generated based on other parameters when this paremeter is not specified -## -## Example: -## config: |- -## broker.id=-1 -## listeners=PLAINTEXT://:9092 -## advertised.listeners=PLAINTEXT://KAFKA_IP:9092 -## num.network.threads=3 -## num.io.threads=8 -## socket.send.buffer.bytes=102400 -## socket.receive.buffer.bytes=102400 -## socket.request.max.bytes=104857600 -## log.dirs=/bitnami/kafka/data -## num.partitions=1 -## num.recovery.threads.per.data.dir=1 -## offsets.topic.replication.factor=1 -## transaction.state.log.replication.factor=1 -## transaction.state.log.min.isr=1 -## log.flush.interval.messages=10000 -## log.flush.interval.ms=1000 -## log.retention.hours=168 -## log.retention.bytes=1073741824 -## log.segment.bytes=1073741824 -## log.retention.check.interval.ms=300000 -## zookeeper.connect=ZOOKEEPER_SERVICE_NAME -## zookeeper.connection.timeout.ms=6000 -## group.initial.rebalance.delay.ms=0 -## -# config: - -## ConfigMap with Kafka Configuration -## NOTE: This will override config -## -# existingConfigmap: - -## Kafka Log4J Configuration -## An optional log4j.properties file to overwrite the default of the Kafka brokers. -## See an example log4j.properties at: -## https://github.com/apache/kafka/blob/trunk/config/log4j.properties -## -# log4j: - -## Kafka Log4j ConfigMap -## The name of an existing ConfigMap containing a log4j.properties file. -## NOTE: this will override log4j. -## -# existingLog4jConfigMap: - -## Kafka's Java Heap size -## -heapOpts: -Xmx1024m -Xms1024m - -## Switch to enable topic deletion or not. -## -deleteTopicEnable: false - -## Switch to enable auto creation of topics. -## Enabling auto creation of topics not recommended for production or similar environments. -## -autoCreateTopicsEnable: false - -## The number of messages to accept before forcing a flush of data to disk. -## -logFlushIntervalMessages: 10000 - -## The maximum amount of time a message can sit in a log before we force a flush. -## -logFlushIntervalMs: 1000 - -## A size-based retention policy for logs. -## -logRetentionBytes: _1073741824 - -## The interval at which log segments are checked to see if they can be deleted. -## -logRetentionCheckIntervalMs: 300000 - -## The minimum age of a log file to be eligible for deletion due to age. -## -logRetentionHours: 168 - -## The maximum size of a log segment file. When this size is reached a new log segment will be created. -## -logSegmentBytes: _1073741824 - -## A comma separated list of directories under which to store log files. -## -logsDirs: /bitnami/kafka/data - -## The largest record batch size allowed by Kafka -## -maxMessageBytes: _1000012 - -## Default replication factors for automatically created topics -## -defaultReplicationFactor: 3 - -## The replication factor for the offsets topic -## -offsetsTopicReplicationFactor: 3 - -## The replication factor for the transaction topic -## -transactionStateLogReplicationFactor: 3 - -## Overridden min.insync.replicas config for the transaction topic -## -transactionStateLogMinIsr: 3 - -## The number of threads doing disk I/O. -## -numIoThreads: 8 - -## The number of threads handling network requests. -## -numNetworkThreads: 3 - -## The default number of log partitions per topic. -## -numPartitions: 1 - -## The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -## -numRecoveryThreadsPerDataDir: 1 - -## The receive buffer (SO_RCVBUF) used by the socket server. -## -socketReceiveBufferBytes: 102400 - -## The maximum size of a request that the socket server will accept (protection against OOM). -## -socketRequestMaxBytes: _104857600 - -## The send buffer (SO_SNDBUF) used by the socket server. -## -socketSendBufferBytes: 102400 - -## Timeout in ms for connecting to zookeeper. -## -zookeeperConnectionTimeoutMs: 6000 - -## Command and args for running the container. Use array form -## -command: - - /scripts/setup.sh -args: - -## All the parameters from the configuration file can be overwritten by using environment variables with this format: KAFKA_CFG_{KEY} -## ref: https://github.com/bitnami/bitnami-docker-kafka#configuration -## Example: -## extraEnvVars: -## - name: KAFKA_CFG_BACKGROUND_THREADS -## value: "10" -## -extraEnvVars: [] - -## extraVolumes and extraVolumeMounts allows you to mount other volumes -## Examples: -# extraVolumes: -# - name: kafka-jaas -# secret: -# secretName: kafka-jaas -# extraVolumeMounts: -# - name: kafka-jaas -# mountPath: /bitnami/kafka/config/kafka_jaas.conf -# subPath: kafka_jaas.conf -extraVolumes: [] -extraVolumeMounts: [] - -## Extra objects to deploy (value evaluated as a template) -## -extraDeploy: [] - -## Authentication parameteres -## https://github.com/bitnami/bitnami-docker-kafka#security -## -auth: - ## Authentication protocol for client and inter-broker communications - ## Supported values: 'plaintext', 'tls', 'mtls', 'sasl' and 'sasl_tls' - ## This table shows the security provided on each protocol: - ## | Method | Authentication | Encryption via TLS | - ## | plaintext | None | No | - ## | tls | None | Yes | - ## | mtls | Yes (two-way authentication) | Yes | - ## | sasl | Yes (via SASL) | No | - ## | sasl_tls | Yes (via SASL) | Yes | - ## - clientProtocol: sasl - interBrokerProtocol: sasl - - ## Allowed SASL mechanisms when clientProtocol or interBrokerProtocol are using either sasl or sasl_tls - ## - saslMechanisms: plain,scram-sha-256,scram-sha-512 - ## SASL mechanism for inter broker communication - ## - saslInterBrokerMechanism: plain - - ## Name of the existing secret containing the truststore and - ## one keystore per Kafka broker you have in the Kafka cluster. - ## MANDATORY when 'tls', 'mtls', or 'sasl_tls' authentication protocols are used. - ## Create this secret following the steps below: - ## 1) Generate your trustore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh - ## 2) Rename your truststore to `kafka.truststore.jks`. - ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker. - ## 4) Run the command below where SECRET_NAME is the name of the secret you want to create: - ## kubectl create secret generic SECRET_NAME --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks ... - ## Alternatively, you can put your JKS files under the files/jks directory - ## - # jksSecret: - - ## Password to access the JKS files when they are password-protected. - ## - # jksPassword: - - ## The endpoint identification algorithm used by clients to validate server host name. - ## Disable server host name verification by setting it to an empty string - ## See: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings - ## - tlsEndpointIdentificationAlgorithm: https - - ## JAAS configuration for SASL authentication - ## MANDATORY when method is 'sasl', or 'sasl_tls' - ## - jaas: - ## Kafka client user list - ## - ## clientUsers: - ## - user1 - ## - user2 - ## - clientUsers: - - user - - ## Kafka client passwords - ## - ## clientPasswords: - ## - password1 - ## - password2 - ## - clientPasswords: [] - - ## Kafka inter broker communication user - ## - interBrokerUser: admin - - ## Kafka inter broker communication password - ## - interBrokerPassword: "" - - ## Kafka Zookeeper user - ## - zookeeperUser: zookeeperUser - - ## Kafka Zookeeper password - ## - zookeeperPassword: zookeeperPassword - - ## Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser. - ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: - ## kubectl create secret generic SECRET_NAME --from-literal=client-password=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD - ## - # existingSecret: - -## The address(es) the socket server listens on. -## When it's set to an empty array, the listeners will be configured -## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) -## -listeners: [] - -## The address(es) (hostname:port) the brokers will advertise to producers and consumers. -## When it's set to an empty array, the advertised listeners will be configured -## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) -## -advertisedListeners: [] - -## The listener->protocol mapping -## When it's nil, the listeners will be configured -## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) -## -# listenerSecurityProtocolMap: - -## Allow to use the PLAINTEXT listener. -## -allowPlaintextListener: false - -## Name of listener used for communication between brokers. -## -interBrokerListenerName: INTERNAL - -## Number of Kafka brokers to deploy -## -replicaCount: 3 - -## StrategyType, can be set to RollingUpdate or OnDelete by default. -## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets -## -updateStrategy: RollingUpdate - -## Partition update strategy -## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions -## -# rollingUpdatePartition: - -## Pod labels. Evaluated as a template -## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ -podLabels: {} - -## Pod annotations. Evaluated as a template -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ -## -podAnnotations: {} - -## Name of the priority class to be used by kafka pods, priority class needs to be created beforehand -## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ -priorityClassName: "" - -## Affinity for pod assignment. Evaluated as a template -## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## -affinity: {} - -## Node labels for pod assignment. Evaluated as a template -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -## -nodeSelector: {} - -## Tolerations for pod assignment. Evaluated as a template -## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] - -## Kafka pods' Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod -## -podSecurityContext: - fsGroup: 1001 - runAsUser: 1001 - -## Kafka containers' Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container -## Example: -## containerSecurityContext: -## capabilities: -## drop: ["NET_RAW"] -## readOnlyRootFilesystem: true -## -containerSecurityContext: {} - -## Kafka containers' resource requests and limits -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## -resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 250m - # memory: 1Gi - requests: {} - # cpu: 250m - # memory: 256Mi - -## Kafka containers' liveness and readiness probes. Evaluated as a template. -## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes -## -livenessProbe: - tcpSocket: - port: kafka-client - initialDelaySeconds: 10 - timeoutSeconds: 5 - # failureThreshold: 3 - # periodSeconds: 10 - # successThreshold: 1 -readinessProbe: - tcpSocket: - port: kafka-client - initialDelaySeconds: 5 - failureThreshold: 6 - timeoutSeconds: 5 - # periodSeconds: 10 - # successThreshold: 1 - -## Pod Disruption Budget configuration -## The PDB will only be created if replicaCount is greater than 1 -## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions -## -pdb: - create: true - ## Min number of pods that must still be available after the eviction - ## - # minAvailable: 1 - ## Max number of pods that can be unavailable after the eviction - ## - maxUnavailable: 1 - -## Add sidecars to the pod. -## Example: -## sidecars: -## - name: your-image-name -## image: your-image -## imagePullPolicy: Always -## ports: -## - name: portname -## containerPort: 1234 -## -sidecars: {} - -## Service parameters -## -service: - ## Service type - ## - type: ClusterIP - ## Kafka port for client connections - ## - port: 9092 - ## Kafka port for inter-broker connections - ## - internalPort: 9093 - ## Kafka port for external connections - ## - externalPort: 9094 - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePorts: - client: "" - external: "" - ## Set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - # loadBalancerIP: - ## Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## Example: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## Provide any additional annotations which may be required. Evaluated as a template - ## - annotations: {} - -## External Access to Kafka brokers configuration -## -externalAccess: - ## Enable Kubernetes external cluster access to Kafka brokers - ## - enabled: false - - ## External IPs auto-discovery configuration - ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API - ## Note: RBAC might be required - ## - autoDiscovery: - ## Enable external IP/ports auto-discovery - ## - enabled: false - ## Bitnami Kubectl image - ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ - ## - image: - registry: docker.io - repository: bitnami/kubectl - tag: 1.17.12-debian-10-r3 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Init Container resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 100m - # memory: 128Mi - requests: {} - # cpu: 100m - # memory: 128Mi - - ## Parameters to configure K8s service(s) used to externally access Kafka brokers - ## A new service per broker will be created - ## - service: - ## Service type. Allowed values: LoadBalancer or NodePort - ## - type: LoadBalancer - ## Port used when service type is LoadBalancer - ## - port: 9094 - ## Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount - ## Example: - ## loadBalancerIPs: - ## - X.X.X.X - ## - Y.Y.Y.Y - ## - loadBalancerIPs: [] - ## Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## Example: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## Array of node ports used for each Kafka broker. Length must be the same as replicaCount - ## Example: - ## nodePorts: - ## - 30001 - ## - 30002 - ## - nodePorts: [] - ## When service type is NodePort, you can specify the domain used for Kafka advertised listeners. - ## If not specified, the container will try to get the kubernetes node external IP - ## - # domain: mydomain.com - ## Provide any additional annotations which may be required. Evaluated as a template - ## - annotations: {} - -## Persistence paramaters -## -persistence: - enabled: true - ## A manually managed Persistent Volume and Claim - ## If defined, PVC must be created manually before volume will be bound - ## The value is evaluated as a template - ## - # existingClaim: - ## PV Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. - ## - # storageClass: "-" - ## PV Access Mode - ## - accessModes: - - ReadWriteOnce - ## PVC size - ## - size: 8Gi - ## PVC annotations - ## - annotations: {} - -## Init Container paramaters -## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component -## values from the securityContext section of the component -## -volumePermissions: - enabled: false - ## Bitnami Minideb image - ## ref: https://hub.docker.com/r/bitnami/minideb/tags/ - ## - image: - registry: docker.io - repository: bitnami/minideb - tag: buster - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Init Container resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 100m - # memory: 128Mi - requests: {} - # cpu: 100m - # memory: 128Mi - -## Kafka pods ServiceAccount -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -## -serviceAccount: - ## Specifies whether a ServiceAccount should be created - ## - create: true - ## The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the fluentd.fullname template - ## - # name: - -## Role Based Access -## ref: https://kubernetes.io/docs/admin/authorization/rbac/ -## -rbac: - ## Specifies whether RBAC rules should be created - ## binding Kafka ServiceAccount to a role - ## that allows Kafka pods querying the K8s API - ## - create: false - -## Prometheus Exporters / Metrics -## -metrics: - ## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter - ## - kafka: - enabled: true - - ## Bitnami Kafka exporter image - ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/ - ## - image: - registry: docker.io - repository: bitnami/kafka-exporter - tag: 1.2.0-debian-10-r220 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - - ## Extra flags to be passed to Kafka exporter - ## Example: - ## extraFlags: - ## tls.insecure-skip-tls-verify: "" - ## web.telemetry-path: "/metrics" - ## - extraFlags: {} - - ## Name of the existing secret containing the optional certificate and key files - ## for Kafka Exporter client authentication - ## - # certificatesSecret: - - ## Prometheus Kafka Exporter' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 100m - # memory: 128Mi - requests: {} - # cpu: 100m - # memory: 128Mi - - ## Service configuration - ## - service: - ## Kafka Exporter Service type - ## - type: ClusterIP - ## Kafka Exporter Prometheus port - ## - port: 9308 - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePort: "" - ## Set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - # loadBalancerIP: - ## Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## Example: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## Set the Cluster IP to use - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address - ## - # clusterIP: None - ## Annotations for the Kafka Exporter Prometheus metrics service - ## - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.kafka.service.port }}" - prometheus.io/path: "/metrics" - - ## Prometheus JMX Exporter: exposes the majority of Kafkas metrics - ## - jmx: - enabled: true - - ## Bitnami JMX exporter image - ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ - ## - image: - registry: docker.io - repository: bitnami/jmx-exporter - tag: 0.14.0-debian-10-r15 - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - - ## Prometheus JMX Exporter' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 100m - # memory: 128Mi - requests: {} - # cpu: 100m - # memory: 128Mi - - ## Service configuration - ## - service: - ## JMX Exporter Service type - ## - type: ClusterIP - ## JMX Exporter Prometheus port - ## - port: 5556 - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePort: "" - ## Set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - # loadBalancerIP: - ## Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## Example: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## Set the Cluster IP to use - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address - ## - # clusterIP: None - ## Annotations for the JMX Exporter Prometheus metrics service - ## - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.jmx.service.port }}" - prometheus.io/path: "/" - - ## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted - ## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics - ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` - ## (2) commented out above `overrideConfig`. - ## - whitelistObjectNames: - - kafka.controller:* - - kafka.server:* - - java.lang:* - - kafka.network:* - - kafka.log:* - - ## Prometheus JMX exporter configuration - ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template - ## - ## Credits to the incubator/kafka chart for the JMX configuration. - ## https://github.com/helm/charts/tree/master/incubator/kafka - ## - config: |- - jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi - lowercaseOutputName: true - lowercaseOutputLabelNames: true - ssl: false - {{- if .Values.metrics.jmx.whitelistObjectNames }} - whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] - {{- end }} - - ## ConfigMap with Prometheus JMX exporter configuration - ## NOTE: This will override metrics.jmx.config - ## - # existingConfigmap: - - ## Prometheus Operator ServiceMonitor configuration - ## - serviceMonitor: - enabled: false - ## Namespace in which Prometheus is running - ## - # namespace: monitoring - - ## Interval at which metrics should be scraped. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - # interval: 10s - - ## Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## - # scrapeTimeout: 10s - - ## ServiceMonitor selector labels - ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration - ## - # selector: - # prometheus: my-prometheus - -## -## Zookeeper chart configuration -## -## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml -## -zookeeper: - enabled: true - auth: - ## Enable Zookeeper auth - ## - enabled: true - ## User that will use Zookeeper clients to auth - ## - clientUser: zookeeperUser - ## Password that will use Zookeeper clients to auth - ## - clientPassword: zookeeperPassword - ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" - ## - serverUsers: zookeeperUser - ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" - ## - serverPasswords: zookeeperPassword - metrics: - enabled: true - -## This value is only used when zookeeper.enabled is set to false -## -externalZookeeper: - ## Server or list of external zookeeper servers to use. - ## - servers: [] diff --git a/scripts/helmcharts/databases/charts/kafka/values.yaml b/scripts/helmcharts/databases/charts/kafka/values.yaml old mode 100755 new mode 100644 index 154d71bd5..1245bdb65 --- a/scripts/helmcharts/databases/charts/kafka/values.yaml +++ b/scripts/helmcharts/databases/charts/kafka/values.yaml @@ -1,62 +1,97 @@ +## @section Global parameters ## Global Docker image parameters ## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) ## -# global: -# imageRegistry: myRegistryName -# imagePullSecrets: -# - myRegistryKeySecretName -# storageClass: myStorageClass +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param clusterDomain Default Kubernetes cluster domain +## +clusterDomain: cluster.local +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity + +## @section Kafka parameters ## Bitnami Kafka image version ## ref: https://hub.docker.com/r/bitnami/kafka/tags/ +## @param image.registry Kafka image registry +## @param image.repository Kafka image repository +## @param image.tag Kafka image tag (immutable tags are recommended) +## @param image.digest Kafka image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy Kafka image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug values should be set ## image: registry: docker.io repository: bitnami/kafka - tag: 2.6.0-debian-10-r30 + tag: 3.3.2-debian-11-r0 + digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: + ## e.g: ## pullSecrets: ## - myRegistryKeySecretName ## pullSecrets: [] - ## Set to true if you would like to see extra information on logs ## debug: false - -## String to partially override kafka.fullname template (will maintain the release name) -## -# nameOverride: - -## String to fully override kafka.fullname template -## -# fullnameOverride: - -## Kubernetes Cluster Domain -## -clusterDomain: cluster.local - -## Add labels to all the deployed resources -## -commonLabels: {} - -## Add annotations to all the deployed resources -## -commonAnnotations: {} - -## Kafka Configuration +## @param config Configuration file for Kafka. Auto-generated based on other parameters when not specified ## Specify content for server.properties -## The server.properties is auto-generated based on other parameters when this paremeter is not specified -## -## Example: +## NOTE: This will override any KAFKA_CFG_ environment variables (including those set by the chart) +## The server.properties is auto-generated based on other parameters when this parameter is not specified +## e.g: ## config: |- ## broker.id=-1 ## listeners=PLAINTEXT://:9092 @@ -82,157 +117,108 @@ commonAnnotations: {} ## zookeeper.connection.timeout.ms=6000 ## group.initial.rebalance.delay.ms=0 ## -# config: - -## ConfigMap with Kafka Configuration -## NOTE: This will override config +config: "" +## @param existingConfigmap ConfigMap with Kafka Configuration +## NOTE: This will override `config` AND any KAFKA_CFG_ environment variables ## -# existingConfigmap: - -## Kafka Log4J Configuration -## An optional log4j.properties file to overwrite the default of the Kafka brokers. -## See an example log4j.properties at: -## https://github.com/apache/kafka/blob/trunk/config/log4j.properties +existingConfigmap: "" +## @param log4j An optional log4j.properties file to overwrite the default of the Kafka brokers +## An optional log4j.properties file to overwrite the default of the Kafka brokers +## ref: https://github.com/apache/kafka/blob/trunk/config/log4j.properties ## -# log4j: - -## Kafka Log4j ConfigMap -## The name of an existing ConfigMap containing a log4j.properties file. -## NOTE: this will override log4j. +log4j: "" +## @param existingLog4jConfigMap The name of an existing ConfigMap containing a log4j.properties file +## The name of an existing ConfigMap containing a log4j.properties file +## NOTE: this will override `log4j` ## -# existingLog4jConfigMap: - -## Kafka's Java Heap size +existingLog4jConfigMap: "" +## @param heapOpts Kafka Java Heap size ## heapOpts: -Xmx1024m -Xms1024m - -## Switch to enable topic deletion or not. +## @param deleteTopicEnable Switch to enable topic deletion or not ## deleteTopicEnable: false - -## Switch to enable auto creation of topics. -## Enabling auto creation of topics not recommended for production or similar environments. +## @param autoCreateTopicsEnable Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments ## autoCreateTopicsEnable: true - -## The number of messages to accept before forcing a flush of data to disk. +## @param logFlushIntervalMessages The number of messages to accept before forcing a flush of data to disk ## -logFlushIntervalMessages: 10000 - -## The maximum amount of time a message can sit in a log before we force a flush. +logFlushIntervalMessages: _10000 +## @param logFlushIntervalMs The maximum amount of time a message can sit in a log before we force a flush ## logFlushIntervalMs: 1000 - -## A size-based retention policy for logs. +## @param logRetentionBytes A size-based retention policy for logs ## logRetentionBytes: _1073741824 - -## The interval at which log segments are checked to see if they can be deleted. +## @param logRetentionCheckIntervalMs The interval at which log segments are checked to see if they can be deleted ## logRetentionCheckIntervalMs: 300000 - -## The minimum age of a log file to be eligible for deletion due to age. +## @param logRetentionHours The minimum age of a log file to be eligible for deletion due to age ## logRetentionHours: 168 - -## The maximum size of a log segment file. When this size is reached a new log segment will be created. +## @param logSegmentBytes The maximum size of a log segment file. When this size is reached a new log segment will be created ## logSegmentBytes: _1073741824 - -## A comma separated list of directories under which to store log files. -## +## @param logsDirs A comma separated list of directories in which kafka's log data is kept +## ref: https://kafka.apache.org/documentation/#brokerconfigs_log.dirs logsDirs: /bitnami/kafka/data - -## The largest record batch size allowed by Kafka +## @param maxMessageBytes The largest record batch size allowed by Kafka ## maxMessageBytes: _1000012 - -## Default replication factors for automatically created topics +## @param defaultReplicationFactor Default replication factors for automatically created topics ## defaultReplicationFactor: 1 - -## The replication factor for the offsets topic +## @param offsetsTopicReplicationFactor The replication factor for the offsets topic ## offsetsTopicReplicationFactor: 1 - -## The replication factor for the transaction topic +## @param transactionStateLogReplicationFactor The replication factor for the transaction topic ## transactionStateLogReplicationFactor: 1 - -## Overridden min.insync.replicas config for the transaction topic +## @param transactionStateLogMinIsr Overridden min.insync.replicas config for the transaction topic ## transactionStateLogMinIsr: 1 - -## The number of threads doing disk I/O. +## @param numIoThreads The number of threads doing disk I/O ## numIoThreads: 8 - -## The number of threads handling network requests. +## @param numNetworkThreads The number of threads handling network requests ## numNetworkThreads: 3 - -## The default number of log partitions per topic. +## @param numPartitions The default number of log partitions per topic ## numPartitions: 1 - -## The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +## @param numRecoveryThreadsPerDataDir The number of threads per data directory to be used for log recovery at startup and flushing at shutdown ## numRecoveryThreadsPerDataDir: 1 - -## The receive buffer (SO_RCVBUF) used by the socket server. +## @param socketReceiveBufferBytes The receive buffer (SO_RCVBUF) used by the socket server ## socketReceiveBufferBytes: 102400 - -## The maximum size of a request that the socket server will accept (protection against OOM). +## @param socketRequestMaxBytes The maximum size of a request that the socket server will accept (protection against OOM) ## socketRequestMaxBytes: _104857600 - -## The send buffer (SO_SNDBUF) used by the socket server. +## @param socketSendBufferBytes The send buffer (SO_SNDBUF) used by the socket server ## socketSendBufferBytes: 102400 - -## Timeout in ms for connecting to zookeeper. +## @param zookeeperConnectionTimeoutMs Timeout in ms for connecting to ZooKeeper ## zookeeperConnectionTimeoutMs: 6000 - -## Command and args for running the container. Use array form +## @param zookeeperChrootPath Path which puts data under some path in the global ZooKeeper namespace +## ref: https://kafka.apache.org/documentation/#brokerconfigs_zookeeper.connect ## -command: - - /scripts/setup.sh -args: - -## All the parameters from the configuration file can be overwritten by using environment variables with this format: KAFKA_CFG_{KEY} -## ref: https://github.com/bitnami/bitnami-docker-kafka#configuration -## Example: -## extraEnvVars: -## - name: KAFKA_CFG_BACKGROUND_THREADS -## value: "10" +zookeeperChrootPath: "" +## @param authorizerClassName The Authorizer is configured by setting authorizer.class.name=kafka.security.authorizer.AclAuthorizer in server.properties ## -extraEnvVars: [] - -## extraVolumes and extraVolumeMounts allows you to mount other volumes -## Examples: -# extraVolumes: -# - name: kafka-jaas -# secret: -# secretName: kafka-jaas -# extraVolumeMounts: -# - name: kafka-jaas -# mountPath: /bitnami/kafka/config/kafka_jaas.conf -# subPath: kafka_jaas.conf -extraVolumes: [] -extraVolumeMounts: [] - -## Extra objects to deploy (value evaluated as a template) +authorizerClassName: "" +## @param allowEveryoneIfNoAclFound By default, if a resource has no associated ACLs, then no one is allowed to access that resource except super users ## -extraDeploy: [] - -## Authentication parameteres -## https://github.com/bitnami/bitnami-docker-kafka#security +allowEveryoneIfNoAclFound: true +## @param superUsers You can add super users in server.properties +## +superUsers: User:admin +## Authentication parameters +## https://github.com/bitnami/containers/tree/main/bitnami/kafka#security ## auth: ## Authentication protocol for client and inter-broker communications - ## Supported values: 'plaintext', 'tls', 'mtls', 'sasl' and 'sasl_tls' ## This table shows the security provided on each protocol: ## | Method | Authentication | Encryption via TLS | ## | plaintext | None | No | @@ -240,223 +226,413 @@ auth: ## | mtls | Yes (two-way authentication) | Yes | ## | sasl | Yes (via SASL) | No | ## | sasl_tls | Yes (via SASL) | Yes | + ## @param auth.clientProtocol Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` + ## @param auth.externalClientProtocol Authentication protocol for communications with external clients. Defaults to value of `auth.clientProtocol`. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` + ## @param auth.interBrokerProtocol Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` ## clientProtocol: plaintext + # Note: empty by default for backwards compatibility reasons, find more information at + # https://github.com/bitnami/charts/pull/8902/ + externalClientProtocol: "" interBrokerProtocol: plaintext - - ## Allowed SASL mechanisms when clientProtocol or interBrokerProtocol are using either sasl or sasl_tls + ## SASL configuration ## - saslMechanisms: plain,scram-sha-256,scram-sha-512 - ## SASL mechanism for inter broker communication + sasl: + ## @param auth.sasl.mechanisms SASL mechanisms when either `auth.interBrokerProtocol`, `auth.clientProtocol` or `auth.externalClientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` + ## + mechanisms: plain,scram-sha-256,scram-sha-512 + ## @param auth.sasl.interBrokerMechanism SASL mechanism for inter broker communication. + ## + interBrokerMechanism: plain + ## JAAS configuration for SASL authentication. + ## + jaas: + ## @param auth.sasl.jaas.clientUsers Kafka client user list + ## + ## clientUsers: + ## - user1 + ## - user2 + ## + clientUsers: + - user + ## @param auth.sasl.jaas.clientPasswords Kafka client passwords. This is mandatory if more than one user is specified in clientUsers + ## + ## clientPasswords: + ## - password1 + ## - password2" + ## + clientPasswords: [] + ## @param auth.sasl.jaas.interBrokerUser Kafka inter broker communication user for SASL authentication + ## + interBrokerUser: admin + ## @param auth.sasl.jaas.interBrokerPassword Kafka inter broker communication password for SASL authentication + ## + interBrokerPassword: "" + ## @param auth.sasl.jaas.zookeeperUser Kafka ZooKeeper user for SASL authentication + ## + zookeeperUser: "" + ## @param auth.sasl.jaas.zookeeperPassword Kafka ZooKeeper password for SASL authentication + ## + zookeeperPassword: "" + ## @param auth.sasl.jaas.existingSecret Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser + ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD + ## + existingSecret: "" + ## TLS configuration ## - saslInterBrokerMechanism: plain - - ## Name of the existing secret containing the truststore and - ## one keystore per Kafka broker you have in the Kafka cluster. - ## MANDATORY when 'tls', 'mtls', or 'sasl_tls' authentication protocols are used. - ## Create this secret following the steps below: - ## 1) Generate your trustore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh - ## 2) Rename your truststore to `kafka.truststore.jks`. - ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker. - ## 4) Run the command below where SECRET_NAME is the name of the secret you want to create: - ## kubectl create secret generic SECRET_NAME --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks ... - ## Alternatively, you can put your JKS files under the files/jks directory + tls: + ## @param auth.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem` + ## + type: jks + ## @param auth.tls.pemChainIncluded Flag to denote that the Certificate Authority (CA) certificates are bundled with the endpoint cert. + ## Certificates must be in proper order, where the top certificate is the leaf and the bottom certificate is the top-most intermediate CA. + ## + pemChainIncluded: false + ## @param auth.tls.existingSecrets Array existing secrets containing the TLS certificates for the Kafka brokers + ## When using 'jks' format for certificates, each secret should contain a truststore and a keystore. + ## Create these secrets following the steps below: + ## 1) Generate your truststore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh + ## 2) Rename your truststore to `kafka.truststore.jks`. + ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker. + ## 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create): + ## kubectl create secret generic SECRET_NAME_0 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-0.keystore.jks + ## kubectl create secret generic SECRET_NAME_1 --from-file=kafka.truststore.jks=./kafka.truststore.jks --from-file=kafka.keystore.jks=./kafka-1.keystore.jks + ## ... + ## + ## When using 'pem' format for certificates, each secret should contain a public CA certificate, a public certificate and one private key. + ## Create these secrets following the steps below: + ## 1) Create a certificate key and signing request per Kafka broker, and sign the signing request with your CA + ## 2) Rename your CA file to `kafka.ca.crt`. + ## 3) Rename your certificates to `kafka-X.tls.crt` where X is the ID of each Kafka broker. + ## 3) Rename your keys to `kafka-X.tls.key` where X is the ID of each Kafka broker. + ## 4) Run the command below one time per broker to create its associated secret (SECRET_NAME_X is the name of the secret you want to create): + ## kubectl create secret generic SECRET_NAME_0 --from-file=ca.crt=./kafka.ca.crt --from-file=tls.crt=./kafka-0.tls.crt --from-file=tls.key=./kafka-0.tls.key + ## kubectl create secret generic SECRET_NAME_1 --from-file=ca.crt=./kafka.ca.crt --from-file=tls.crt=./kafka-1.tls.crt --from-file=tls.key=./kafka-1.tls.key + ## ... + ## + existingSecrets: [] + ## @param auth.tls.autoGenerated Generate automatically self-signed TLS certificates for Kafka brokers. Currently only supported if `auth.tls.type` is `pem` + ## Note: ignored when using 'jks' format or `auth.tls.existingSecrets` is not empty + ## + autoGenerated: false + ## @param auth.tls.password Password to access the JKS files or PEM key when they are password-protected. + ## Note: ignored when using 'existingSecret'. + ## + password: "" + ## @param auth.tls.existingSecret Name of the secret containing the password to access the JKS files or PEM key when they are password-protected. (`key`: `password`) + ## + existingSecret: "" + ## @param auth.tls.jksTruststoreSecret Name of the existing secret containing your truststore if truststore not existing or different from the ones in the `auth.tls.existingSecrets` + ## Note: ignored when using 'pem' format for certificates. + ## + jksTruststoreSecret: "" + ## @param auth.tls.jksKeystoreSAN The secret key from the `auth.tls.existingSecrets` containing the keystore with a SAN certificate + ## The SAN certificate in it should be issued with Subject Alternative Names for all headless services: + ## - kafka-0.kafka-headless.kafka.svc.cluster.local + ## - kafka-1.kafka-headless.kafka.svc.cluster.local + ## - kafka-2.kafka-headless.kafka.svc.cluster.local + ## Note: ignored when using 'pem' format for certificates. + ## + jksKeystoreSAN: "" + ## @param auth.tls.jksTruststore The secret key from the `auth.tls.existingSecrets` or `auth.tls.jksTruststoreSecret` containing the truststore + ## Note: ignored when using 'pem' format for certificates. + ## + jksTruststore: "" + ## @param auth.tls.endpointIdentificationAlgorithm The endpoint identification algorithm to validate server hostname using server certificate + ## Disable server host name verification by setting it to an empty string. + ## ref: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + endpointIdentificationAlgorithm: https + ## Zookeeper client configuration for kafka brokers ## - # jksSecret: - - ## Password to access the JKS files when they are password-protected. - ## - # jksPassword: - - ## The endpoint identification algorithm used by clients to validate server host name. - ## Disable server host name verification by setting it to an empty string - ## See: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings - ## - tlsEndpointIdentificationAlgorithm: https - - ## JAAS configuration for SASL authentication - ## MANDATORY when method is 'sasl', or 'sasl_tls' - ## - jaas: - ## Kafka client user list + zookeeper: + ## TLS configuration ## - ## clientUsers: - ## - user1 - ## - user2 - ## - clientUsers: - - user - - ## Kafka client passwords. This is mandatory if more than one user is specified in clientUsers. - ## - ## clientPasswords: - ## - password1 - ## - password2" - ## - clientPasswords: [] - - ## Kafka inter broker communication user - ## - interBrokerUser: admin - - ## Kafka inter broker communication password - ## - interBrokerPassword: "" - - ## Kafka Zookeeper user - ## - # zookeeperUser: - - ## Kafka Zookeeper password - ## - # zookeeperPassword: - - ## Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser. - ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: - ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD - ## - # existingSecret: - -## The address(es) the socket server listens on. + tls: + ## @param auth.zookeeper.tls.enabled Enable TLS for Zookeeper client connections. + ## + enabled: false + ## @param auth.zookeeper.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem`. + ## + type: jks + ## @param auth.zookeeper.tls.verifyHostname Hostname validation. + ## + verifyHostname: true + ## @param auth.zookeeper.tls.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper client communications. + ## + existingSecret: "" + ## @param auth.zookeeper.tls.existingSecretKeystoreKey The secret key from the auth.zookeeper.tls.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: zookeeper.keystore.jks + ## @param auth.zookeeper.tls.existingSecretTruststoreKey The secret key from the auth.zookeeper.tls.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: zookeeper.truststore.jks + ## @param auth.zookeeper.tls.passwordsSecret Existing secret containing Keystore and Truststore passwords. + ## + passwordsSecret: "" + ## @param auth.zookeeper.tls.passwordsSecretKeystoreKey The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: keystore-password + ## @param auth.zookeeper.tls.passwordsSecretTruststoreKey The secret key from the auth.zookeeper.tls.passwordsSecret containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: truststore-password +## @param listeners The address(es) the socket server listens on. Auto-calculated it's set to an empty array ## When it's set to an empty array, the listeners will be configured -## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters) ## listeners: [] - -## The address(es) (hostname:port) the brokers will advertise to producers and consumers. +## @param advertisedListeners The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array ## When it's set to an empty array, the advertised listeners will be configured -## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters) ## advertisedListeners: [] - -## The listener->protocol mapping -## When it's nil, the listeners will be configured -## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## @param listenerSecurityProtocolMap The protocol->listener mapping. Auto-calculated it's set to nil +## When it's nil, the listeners will be configured based on the authentication protocols (auth.clientProtocol, auth.externalClientProtocol and auth.interBrokerProtocol parameters) ## -# listenerSecurityProtocolMap: - -## Allow to use the PLAINTEXT listener. +listenerSecurityProtocolMap: "" +## @param allowPlaintextListener Allow to use the PLAINTEXT listener ## allowPlaintextListener: true - -## Name of listener used for communication between brokers. +## @param interBrokerListenerName The listener that the brokers should communicate on ## interBrokerListenerName: INTERNAL - -## Number of Kafka brokers to deploy +## @param command Override Kafka container command ## -replicaCount: 2 - -## StrategyType, can be set to RollingUpdate or OnDelete by default. -## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +command: + - /scripts/setup.sh +## @param args Override Kafka container arguments ## -updateStrategy: RollingUpdate - -## Partition update strategy -## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +args: [] +## @param extraEnvVars Extra environment variables to add to Kafka pods +## ref: https://github.com/bitnami/containers/tree/main/bitnami/kafka#configuration +## e.g: +## extraEnvVars: +## - name: KAFKA_CFG_BACKGROUND_THREADS +## value: "10" ## -# rollingUpdatePartition: - -## Pod labels. Evaluated as a template -## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +extraEnvVars: [] +## @param extraEnvVarsCM ConfigMap with extra environment variables ## -podLabels: {} - -## Pod annotations. Evaluated as a template -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Secret with extra environment variables ## -podAnnotations: {} +extraEnvVarsSecret: "" -## Name of the priority class to be used by kafka pods, priority class needs to be created beforehand -## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## @section Statefulset parameters + +## @param replicaCount Number of Kafka nodes ## -priorityClassName: "" - -## Affinity for pod assignment. Evaluated as a template -## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +replicaCount: 1 +## @param minBrokerId Minimal broker.id value, nodes increment their `broker.id` respectively +## Brokers increment their ID starting at this minimal value. +## E.g., with `minBrokerId=100` and 3 nodes, IDs will be 100, 101, 102 for brokers 0, 1, and 2, respectively. ## -affinity: {} - -## Node labels for pod assignment. Evaluated as a template -## ref: https://kubernetes.io/docs/user-guide/node-selection/ +minBrokerId: 0 +## @param brokerRackAssignment Set Broker Assignment for multi tenant environment Allowed values: `aws-az` +## ref: https://cwiki.apache.org/confluence/display/KAFKA/KIP-392%3A+Allow+consumers+to+fetch+from+closest+replica ## -nodeSelector: {} - -## Tolerations for pod assignment. Evaluated as a template -## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +brokerRackAssignment: "" +## @param containerPorts.client Kafka client container port +## @param containerPorts.internal Kafka inter-broker container port +## @param containerPorts.external Kafka external container port ## -tolerations: [] - -## Kafka pods' Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod -## -podSecurityContext: - fsGroup: 1001 - runAsUser: 1001 - -## Kafka containers' Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container -## Example: -## containerSecurityContext: -## capabilities: -## drop: ["NET_RAW"] -## readOnlyRootFilesystem: true -## -containerSecurityContext: {} - -## Kafka containers' resource requests and limits -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## -resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - limits: {} - # cpu: 250m - # memory: 1Gi - requests: {} - # cpu: 250m - # memory: 256Mi - -## Kafka containers' liveness and readiness probes. Evaluated as a template. -## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +containerPorts: + client: 9092 + internal: 9093 + external: 9094 +## Configure extra options for Kafka containers' liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on Kafka containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe ## livenessProbe: enabled: true initialDelaySeconds: 10 timeoutSeconds: 5 - # failureThreshold: 3 - # periodSeconds: 10 - # successThreshold: 1 + failureThreshold: 3 + periodSeconds: 10 + successThreshold: 1 +## @param readinessProbe.enabled Enable readinessProbe on Kafka containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## readinessProbe: enabled: true initialDelaySeconds: 5 failureThreshold: 6 timeoutSeconds: 5 - # periodSeconds: 10 - # successThreshold: 1 - -## Custom liveness/readiness probes that will override the default ones + periodSeconds: 10 + successThreshold: 1 +## @param startupProbe.enabled Enable startupProbe on Kafka containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 +## @param customLivenessProbe Custom livenessProbe that overrides the default one ## customLivenessProbe: {} -customReadinessProbe: {} - -## Pod Disruption Budget configuration -## The PDB will only be created if replicaCount is greater than 1 -## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## @param customReadinessProbe Custom readinessProbe that overrides the default one ## -pdb: - create: true - ## Min number of pods that must still be available after the eviction +customReadinessProbe: {} +## @param customStartupProbe Custom startupProbe that overrides the default one +## +customStartupProbe: {} +## @param lifecycleHooks lifecycleHooks for the Kafka container to automate configuration before or after startup +## +lifecycleHooks: {} +## Kafka resource requests and limits +## ref: https://kubernetes.io/docs/user-guide/compute-resources/ +## @param resources.limits The resources limits for the container +## @param resources.requests The requested resources for the container +## +resources: + limits: {} + requests: {} +## Kafka pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enable security context for the pods +## @param podSecurityContext.fsGroup Set Kafka pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroup: 1001 +## Kafka containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enable Kafka containers' Security Context +## @param containerSecurityContext.runAsUser Set Kafka containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set Kafka containers' Security Context runAsNonRoot +## @param containerSecurityContext.allowPrivilegeEscalation Force the child process to be run as nonprivilege +## e.g: +## containerSecurityContext: +## enabled: true +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false +## @param hostAliases Kafka pods host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param hostNetwork Specify if host network should be enabled for Kafka pods +## +hostNetwork: false +## @param hostIPC Specify if host IPC should be enabled for Kafka pods +## +hostIPC: false +## @param podLabels Extra labels for Kafka pods +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations Extra annotations for Kafka pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` ## - # minAvailable: 1 - ## Max number of pods that can be unavailable after the eviction + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" ## - maxUnavailable: 1 - -## Add sidecars to the pod. -## Example: + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] +## @param terminationGracePeriodSeconds Seconds the pod needs to gracefully terminate +## ref: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#hook-handler-execution +## +terminationGracePeriodSeconds: "" +## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel +## @param priorityClassName Name of the existing priority class to be used by kafka pods +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param schedulerName Name of the k8s scheduler (other than default) +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param updateStrategy.type Kafka statefulset strategy type +## @param updateStrategy.rollingUpdate Kafka statefulset rolling update configuration parameters +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + rollingUpdate: {} +## @param extraVolumes Optionally specify extra list of additional volumes for the Kafka pod(s) +## e.g: +## extraVolumes: +## - name: kafka-jaas +## secret: +## secretName: kafka-jaas +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka container(s) +## extraVolumeMounts: +## - name: kafka-jaas +## mountPath: /bitnami/kafka/config/kafka_jaas.conf +## subPath: kafka_jaas.conf +## +extraVolumeMounts: [] +## @param sidecars Add additional sidecar containers to the Kafka pod(s) +## e.g: ## sidecars: ## - name: your-image-name ## image: your-image @@ -465,389 +641,748 @@ pdb: ## - name: portname ## containerPort: 1234 ## -sidecars: {} +sidecars: [] +## @param initContainers Add additional Add init containers to the Kafka pod(s) +## e.g: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## Kafka Pod Disruption Budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## @param pdb.create Deploy a pdb object for the Kafka pod +## @param pdb.minAvailable Maximum number/percentage of unavailable Kafka replicas +## @param pdb.maxUnavailable Maximum number/percentage of unavailable Kafka replicas +## +pdb: + create: false + minAvailable: "" + maxUnavailable: 1 + +## @section Traffic Exposure parameters ## Service parameters ## service: - ## Service type + ## @param service.type Kubernetes Service type ## type: ClusterIP - ## Kafka port for client connections + ## @param service.ports.client Kafka svc port for client connections + ## @param service.ports.internal Kafka svc port for inter-broker connections + ## @param service.ports.external Kafka svc port for external connections ## - port: 9092 - ## Kafka port for inter-broker connections - ## - internalPort: 9093 - ## Kafka port for external connections - ## - externalPort: 9094 - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ports: + client: 9092 + internal: 9093 + external: 9094 + ## @param service.nodePorts.client Node port for the Kafka client connections + ## @param service.nodePorts.external Node port for the Kafka external connections + ## NOTE: choose port between <30000-32767> ## nodePorts: client: "" external: "" - ## Set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ ## - # loadBalancerIP: - ## Load Balancer sources + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param service.clusterIP Kafka service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP Kafka service Load Balancer IP + ## ref: https://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges Kafka service Load Balancer sources ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## Example: + ## e.g: ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 + ## - 10.10.10.0/24 ## loadBalancerSourceRanges: [] - ## Provide any additional annotations which may be required. Evaluated as a template + ## @param service.externalTrafficPolicy Kafka service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for Kafka service ## annotations: {} - + ## Headless service properties + ## + headless: + ## @param service.headless.publishNotReadyAddresses Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready + ## ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/ + publishNotReadyAddresses: false + ## @param service.headless.annotations Annotations for the headless service. + ## + annotations: {} + ## @param service.headless.labels Labels for the headless service. + ## + labels: {} + ## @param service.extraPorts Extra ports to expose in the Kafka service (normally used with the `sidecar` value) + ## + extraPorts: [] ## External Access to Kafka brokers configuration ## externalAccess: - ## Enable Kubernetes external cluster access to Kafka brokers + ## @param externalAccess.enabled Enable Kubernetes external cluster access to Kafka brokers ## enabled: false - ## External IPs auto-discovery configuration ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API ## Note: RBAC might be required ## autoDiscovery: - ## Enable external IP/ports auto-discovery + ## @param externalAccess.autoDiscovery.enabled Enable using an init container to auto-detect external IPs/ports by querying the K8s API ## enabled: false ## Bitnami Kubectl image ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## @param externalAccess.autoDiscovery.image.registry Init container auto-discovery image registry + ## @param externalAccess.autoDiscovery.image.repository Init container auto-discovery image repository + ## @param externalAccess.autoDiscovery.image.tag Init container auto-discovery image tag (immutable tags are recommended) + ## @param externalAccess.autoDiscovery.image.digest Petete image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param externalAccess.autoDiscovery.image.pullPolicy Init container auto-discovery image pull policy + ## @param externalAccess.autoDiscovery.image.pullSecrets Init container auto-discovery image pull secrets ## image: registry: docker.io repository: bitnami/kubectl - tag: 1.17.12-debian-10-r3 + tag: 1.25.6-debian-11-r1 + digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: + ## e.g: ## pullSecrets: ## - myRegistryKeySecretName ## pullSecrets: [] ## Init Container resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param externalAccess.autoDiscovery.resources.limits The resources limits for the auto-discovery init container + ## @param externalAccess.autoDiscovery.resources.requests The requested resources for the auto-discovery init container ## resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: {} - # cpu: 100m - # memory: 128Mi requests: {} - # cpu: 100m - # memory: 128Mi - ## Parameters to configure K8s service(s) used to externally access Kafka brokers - ## A new service per broker will be created + ## Note: A new service per broker will be created ## service: - ## Service type. Allowed values: LoadBalancer or NodePort + ## @param externalAccess.service.type Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP ## type: LoadBalancer - ## Port used when service type is LoadBalancer + ## @param externalAccess.service.ports.external Kafka port used for external access when service type is LoadBalancer ## - port: 9094 - ## Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount - ## Example: + ports: + external: 9094 + ## @param externalAccess.service.loadBalancerIPs Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## e.g: ## loadBalancerIPs: ## - X.X.X.X ## - Y.Y.Y.Y ## loadBalancerIPs: [] - ## Load Balancer sources + ## @param externalAccess.service.loadBalancerNames Array of load balancer Names for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerNames: + ## - broker1.external.example.com + ## - broker2.external.example.com + ## + loadBalancerNames: [] + ## @param externalAccess.service.loadBalancerAnnotations Array of load balancer annotations for each Kafka broker. Length must be the same as replicaCount + ## e.g: + ## loadBalancerAnnotations: + ## - external-dns.alpha.kubernetes.io/hostname: broker1.external.example.com. + ## - external-dns.alpha.kubernetes.io/hostname: broker2.external.example.com. + ## + loadBalancerAnnotations: [] + ## @param externalAccess.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## Example: + ## e.g: ## loadBalancerSourceRanges: ## - 10.10.10.0/24 ## loadBalancerSourceRanges: [] - ## Array of node ports used for each Kafka broker. Length must be the same as replicaCount - ## Example: + ## @param externalAccess.service.nodePorts Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## e.g: ## nodePorts: ## - 30001 ## - 30002 ## nodePorts: [] - ## When service type is NodePort, you can specify the domain used for Kafka advertised listeners. - ## If not specified, the container will try to get the kubernetes node external IP + ## @param externalAccess.service.useHostIPs Use service host IPs to configure Kafka external listener when service type is NodePort ## - # domain: mydomain.com - ## Provide any additional annotations which may be required. Evaluated as a template + useHostIPs: false + ## @param externalAccess.service.usePodIPs using the MY_POD_IP address for external access. + ## + usePodIPs: false + ## @param externalAccess.service.domain Domain or external ip used to configure Kafka external listener when service type is NodePort or ClusterIP + ## NodePort: If not specified, the container will try to get the kubernetes node external IP + ## ClusterIP: Must be specified, ingress IP or domain where tcp for external ports is configured + ## + domain: "" + ## @param externalAccess.service.publishNotReadyAddresses Indicates that any agent which deals with endpoints for this Service should disregard any indications of ready/not-ready + ## ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/ + publishNotReadyAddresses: false + ## @param externalAccess.service.labels Service labels for external access + ## + labels: {} + ## @param externalAccess.service.annotations Service annotations for external access ## annotations: {} + ## @param externalAccess.service.extraPorts Extra ports to expose in the Kafka external service + ## + extraPorts: [] +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port Kafka is + ## listening on. When true, zookeeper accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the kafka. + ## But sometimes, we want the kafka to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## e.g: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + ## @param networkPolicy.externalAccess.from customize the from section for External Access on tcp-external port + ## e.g: + ## - ipBlock: + ## cidr: 172.9.0.0/16 + ## except: + ## - 172.9.1.0/24 + ## + externalAccess: + from: [] + ## @param networkPolicy.egressRules.customRules [object] Custom network policy rule + ## + egressRules: + ## Additional custom egress rules + ## e.g: + ## customRules: + ## - to: + ## - namespaceSelector: + ## matchLabels: + ## label: example + customRules: [] -## Persistence paramaters +## @section Persistence parameters + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ ## persistence: + ## @param persistence.enabled Enable Kafka data persistence using PVC, note that ZooKeeper persistence is unaffected + ## enabled: true - ## A manually managed Persistent Volume and Claim + ## @param persistence.existingClaim A manually managed Persistent Volume and Claim ## If defined, PVC must be created manually before volume will be bound ## The value is evaluated as a template ## - # existingClaim: - ## PV Storage Class + existingClaim: "" + ## @param persistence.storageClass PVC Storage Class for Kafka data volume ## If defined, storageClassName: ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. ## - # storageClass: "-" - ## PV Access Mode + storageClass: "" + ## @param persistence.accessModes Persistent Volume Access Modes ## accessModes: - ReadWriteOnce - ## PVC size + ## @param persistence.size PVC Storage Request for Kafka data volume ## size: 8Gi - ## PVC annotations + ## @param persistence.annotations Annotations for the PVC ## annotations: {} + ## @param persistence.labels Labels for the PVC + ## + labels: {} + ## @param persistence.selector Selector to match an existing Persistent Volume for Kafka data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.mountPath Mount path of the Kafka data volume + ## + mountPath: /bitnami/kafka +## Log Persistence parameters +## +logPersistence: + ## @param logPersistence.enabled Enable Kafka logs persistence using PVC, note that ZooKeeper persistence is unaffected + ## + enabled: false + ## @param logPersistence.existingClaim A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param logPersistence.storageClass PVC Storage Class for Kafka logs volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + storageClass: "" + ## @param logPersistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param logPersistence.size PVC Storage Request for Kafka logs volume + ## + size: 8Gi + ## @param logPersistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param logPersistence.selector Selector to match an existing Persistent Volume for Kafka log data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param logPersistence.mountPath Mount path of the Kafka logs volume + ## + mountPath: /opt/bitnami/kafka/logs -## Init Container paramaters -## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component -## values from the securityContext section of the component +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node ## volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## enabled: false - ## Bitnami Minideb image - ## ref: https://hub.docker.com/r/bitnami/minideb/tags/ + ## @param volumePermissions.image.registry Init container volume-permissions image registry + ## @param volumePermissions.image.repository Init container volume-permissions image repository + ## @param volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets ## image: registry: docker.io - repository: bitnami/minideb - tag: buster - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: Always - ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + repository: bitnami/bitnami-shell + tag: 11-debian-11-r75 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ ## Example: ## pullSecrets: ## - myRegistryKeySecretName ## pullSecrets: [] - ## Init Container resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits Init container volume-permissions resource limits + ## @param volumePermissions.resources.requests Init container volume-permissions resource requests ## resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: {} - # cpu: 100m - # memory: 128Mi requests: {} - # cpu: 100m - # memory: 128Mi + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + runAsUser: 0 -## Kafka pods ServiceAccount +## @section Other Parameters + +## ServiceAccount for Kafka ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ ## serviceAccount: - ## Specifies whether a ServiceAccount should be created + ## @param serviceAccount.create Enable creation of ServiceAccount for Kafka pods ## create: true - ## The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the fluentd.fullname template + ## @param serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the kafka.serviceAccountName template ## - # name: - -## Role Based Access + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## Role Based Access Control ## ref: https://kubernetes.io/docs/admin/authorization/rbac/ ## rbac: - ## Specifies whether RBAC rules should be created + ## @param rbac.create Whether to create & use RBAC resources or not ## binding Kafka ServiceAccount to a role ## that allows Kafka pods querying the K8s API ## create: false +## @section Metrics parameters + ## Prometheus Exporters / Metrics ## metrics: - ## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter + ## Prometheus Kafka exporter: exposes complimentary metrics to JMX exporter ## kafka: + ## @param metrics.kafka.enabled Whether or not to create a standalone Kafka exporter to expose Kafka metrics + ## enabled: false - ## Bitnami Kafka exporter image ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/ + ## @param metrics.kafka.image.registry Kafka exporter image registry + ## @param metrics.kafka.image.repository Kafka exporter image repository + ## @param metrics.kafka.image.tag Kafka exporter image tag (immutable tags are recommended) + ## @param metrics.kafka.image.digest Kafka exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.kafka.image.pullPolicy Kafka exporter image pull policy + ## @param metrics.kafka.image.pullSecrets Specify docker-registry secret names as an array ## image: registry: docker.io repository: bitnami/kafka-exporter - tag: 1.2.0-debian-10-r220 + tag: 1.6.0-debian-11-r52 + digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: + ## e.g: ## pullSecrets: ## - myRegistryKeySecretName ## pullSecrets: [] - ## Extra flags to be passed to Kafka exporter - ## Example: + ## @param metrics.kafka.certificatesSecret Name of the existing secret containing the optional certificate and key files + ## for Kafka exporter client authentication + ## + certificatesSecret: "" + ## @param metrics.kafka.tlsCert The secret key from the certificatesSecret if 'client-cert' key different from the default (cert-file) + ## + tlsCert: cert-file + ## @param metrics.kafka.tlsKey The secret key from the certificatesSecret if 'client-key' key different from the default (key-file) + ## + tlsKey: key-file + ## @param metrics.kafka.tlsCaSecret Name of the existing secret containing the optional ca certificate for Kafka exporter client authentication + ## + tlsCaSecret: "" + ## @param metrics.kafka.tlsCaCert The secret key from the certificatesSecret or tlsCaSecret if 'ca-cert' key different from the default (ca-file) + ## + tlsCaCert: ca-file + ## @param metrics.kafka.extraFlags Extra flags to be passed to Kafka exporter + ## e.g: ## extraFlags: ## tls.insecure-skip-tls-verify: "" ## web.telemetry-path: "/metrics" ## extraFlags: {} - - ## Name of the existing secret containing the optional certificate and key files - ## for Kafka Exporter client authentication + ## @param metrics.kafka.command Override Kafka exporter container command ## - # certificatesSecret: - - ## Prometheus Kafka Exporter' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + command: [] + ## @param metrics.kafka.args Override Kafka exporter container arguments + ## + args: [] + ## @param metrics.kafka.containerPorts.metrics Kafka exporter metrics container port + ## + containerPorts: + metrics: 9308 + ## Kafka exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.kafka.resources.limits The resources limits for the container + ## @param metrics.kafka.resources.requests The requested resources for the container ## resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: {} - # cpu: 100m - # memory: 128Mi requests: {} - # cpu: 100m - # memory: 128Mi - - ## Service configuration + ## Kafka exporter pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param metrics.kafka.podSecurityContext.enabled Enable security context for the pods + ## @param metrics.kafka.podSecurityContext.fsGroup Set Kafka exporter pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Kafka exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.kafka.containerSecurityContext.enabled Enable Kafka exporter containers' Security Context + ## @param metrics.kafka.containerSecurityContext.runAsUser Set Kafka exporter containers' Security Context runAsUser + ## @param metrics.kafka.containerSecurityContext.runAsNonRoot Set Kafka exporter containers' Security Context runAsNonRoot + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param metrics.kafka.hostAliases Kafka exporter pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param metrics.kafka.podLabels Extra labels for Kafka exporter pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param metrics.kafka.podAnnotations Extra annotations for Kafka exporter pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param metrics.kafka.podAffinityPreset Pod affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param metrics.kafka.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node metrics.kafka.affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param metrics.kafka.nodeAffinityPreset.type Node affinity preset type. Ignored if `metrics.kafka.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param metrics.kafka.nodeAffinityPreset.key Node label key to match Ignored if `metrics.kafka.affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param metrics.kafka.nodeAffinityPreset.values Node label values to match. Ignored if `metrics.kafka.affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param metrics.kafka.affinity Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: metrics.kafka.podAffinityPreset, metrics.kafka.podAntiAffinityPreset, and metrics.kafka.nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param metrics.kafka.nodeSelector Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param metrics.kafka.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param metrics.kafka.schedulerName Name of the k8s scheduler (other than default) for Kafka exporter + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param metrics.kafka.priorityClassName Kafka exporter pods' priorityClassName + ## + priorityClassName: "" + ## @param metrics.kafka.topologySpreadConstraints Topology Spread Constraints for pod assignment + ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## The value is evaluated as a template + ## + topologySpreadConstraints: [] + ## @param metrics.kafka.extraVolumes Optionally specify extra list of additional volumes for the Kafka exporter pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param metrics.kafka.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka exporter container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param metrics.kafka.sidecars Add additional sidecar containers to the Kafka exporter pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param metrics.kafka.initContainers Add init containers to the Kafka exporter pods + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## Kafka exporter service configuration ## service: - ## Kafka Exporter Service type + ## @param metrics.kafka.service.ports.metrics Kafka exporter metrics service port ## - type: ClusterIP - ## Kafka Exporter Prometheus port - ## - port: 9308 - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePort: "" - ## Set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - # loadBalancerIP: - ## Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## Example: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## Set the Cluster IP to use + ports: + metrics: 9308 + ## @param metrics.kafka.service.clusterIP Static clusterIP or None for headless services ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address ## - # clusterIP: None - ## Annotations for the Kafka Exporter Prometheus metrics service + clusterIP: "" + ## @param metrics.kafka.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.kafka.service.annotations [object] Annotations for the Kafka exporter service ## annotations: prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.kafka.service.port }}" + prometheus.io/port: "{{ .Values.metrics.kafka.service.ports.metrics }}" prometheus.io/path: "/metrics" - - ## Prometheus JMX Exporter: exposes the majority of Kafkas metrics + ## Kafka exporter pods ServiceAccount + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param metrics.kafka.serviceAccount.create Enable creation of ServiceAccount for Kafka exporter pods + ## + create: true + ## @param metrics.kafka.serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the kafka.metrics.kafka.serviceAccountName template + ## + name: "" + ## @param metrics.kafka.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## Prometheus JMX exporter: exposes the majority of Kafkas metrics ## jmx: + ## @param metrics.jmx.enabled Whether or not to expose JMX metrics to Prometheus + ## enabled: false - ## Bitnami JMX exporter image ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ + ## @param metrics.jmx.image.registry JMX exporter image registry + ## @param metrics.jmx.image.repository JMX exporter image repository + ## @param metrics.jmx.image.tag JMX exporter image tag (immutable tags are recommended) + ## @param metrics.jmx.image.digest JMX exporter image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param metrics.jmx.image.pullPolicy JMX exporter image pull policy + ## @param metrics.jmx.image.pullSecrets Specify docker-registry secret names as an array ## image: registry: docker.io repository: bitnami/jmx-exporter - tag: 0.14.0-debian-10-r15 + tag: 0.17.2-debian-11-r41 + digest: "" ## Specify a imagePullPolicy ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images ## pullPolicy: IfNotPresent ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Example: + ## e.g: ## pullSecrets: ## - myRegistryKeySecretName ## pullSecrets: [] - - ## Prometheus JMX Exporter' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## Prometheus JMX exporter containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param metrics.jmx.containerSecurityContext.enabled Enable Prometheus JMX exporter containers' Security Context + ## @param metrics.jmx.containerSecurityContext.runAsUser Set Prometheus JMX exporter containers' Security Context runAsUser + ## @param metrics.jmx.containerSecurityContext.runAsNonRoot Set Prometheus JMX exporter containers' Security Context runAsNonRoot + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param metrics.jmx.containerPorts.metrics Prometheus JMX exporter metrics container port + ## + containerPorts: + metrics: 5556 + ## Prometheus JMX exporter resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param metrics.jmx.resources.limits The resources limits for the JMX exporter container + ## @param metrics.jmx.resources.requests The requested resources for the JMX exporter container ## resources: - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. limits: {} - # cpu: 100m - # memory: 128Mi requests: {} - # cpu: 100m - # memory: 128Mi - - ## Service configuration + ## Prometheus JMX exporter service configuration ## service: - ## JMX Exporter Service type + ## @param metrics.jmx.service.ports.metrics Prometheus JMX exporter metrics service port ## - type: ClusterIP - ## JMX Exporter Prometheus port - ## - port: 5556 - ## Specify the nodePort value for the LoadBalancer and NodePort service types. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePort: "" - ## Set the LoadBalancer service type to internal only. - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - # loadBalancerIP: - ## Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## Example: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## Set the Cluster IP to use + ports: + metrics: 5556 + ## @param metrics.jmx.service.clusterIP Static clusterIP or None for headless services ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address ## - # clusterIP: None - ## Annotations for the JMX Exporter Prometheus metrics service + clusterIP: "" + ## @param metrics.jmx.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param metrics.jmx.service.annotations [object] Annotations for the Prometheus JMX exporter service ## annotations: prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.jmx.service.port }}" + prometheus.io/port: "{{ .Values.metrics.jmx.service.ports.metrics }}" prometheus.io/path: "/" - - ## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted - ## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics + ## @param metrics.jmx.whitelistObjectNames Allows setting which JMX objects you want to expose to via JMX stats to JMX exporter + ## Only whitelisted values will be exposed via JMX exporter. They must also be exposed via Rules. To expose all metrics ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` ## (2) commented out above `overrideConfig`. ## @@ -857,8 +1392,7 @@ metrics: - java.lang:* - kafka.network:* - kafka.log:* - - ## Prometheus JMX exporter configuration + ## @param metrics.jmx.config [string] Configuration file for JMX exporter ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template ## ## Credits to the incubator/kafka chart for the JMX configuration. @@ -872,63 +1406,347 @@ metrics: {{- if .Values.metrics.jmx.whitelistObjectNames }} whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] {{- end }} - - ## ConfigMap with Prometheus JMX exporter configuration + ## @param metrics.jmx.existingConfigmap Name of existing ConfigMap with JMX exporter configuration ## NOTE: This will override metrics.jmx.config ## - # existingConfigmap: - + existingConfigmap: "" + ## @param metrics.jmx.extraRules Add extra rules to JMX exporter configuration + ## e.g: + ## extraRules: |- + ## - pattern: kafka.server<>(connection-count) + ## name: kafka_server_socket_server_metrics_$3 + ## labels: + ## listener: $1 + extraRules: "" ## Prometheus Operator ServiceMonitor configuration ## serviceMonitor: + ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) + ## enabled: false - ## Namespace in which Prometheus is running + ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running ## - # namespace: monitoring - - ## Interval at which metrics should be scraped. + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint ## - # interval: 10s - - ## Timeout after which the scrape is ended + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint ## - # scrapeTimeout: 10s - - ## ServiceMonitor selector labels - ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + scrapeTimeout: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus ## - # selector: - # prometheus: my-prometheus + labels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + prometheusRule: + ## @param metrics.prometheusRule.enabled if `true`, creates a Prometheus Operator PrometheusRule (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace in which Prometheus is running + ## + namespace: "" + ## @param metrics.prometheusRule.labels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + labels: {} + ## @param metrics.prometheusRule.groups Prometheus Rule Groups for Kafka + ## + groups: [] + +## @section Kafka provisioning parameters + +## Kafka provisioning ## -## Zookeeper chart configuration -## -## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml +provisioning: + ## @param provisioning.enabled Enable kafka provisioning Job + ## + enabled: false + ## @param provisioning.numPartitions Default number of partitions for topics when unspecified + ## + numPartitions: 1 + ## @param provisioning.replicationFactor Default replication factor for topics when unspecified + ## + replicationFactor: 1 + ## @param provisioning.topics Kafka topics to provision + ## - name: topic-name + ## partitions: 1 + ## replicationFactor: 1 + ## ## https://kafka.apache.org/documentation/#topicconfigs + ## config: + ## max.message.bytes: 64000 + ## flush.messages: 1 + ## + topics: [] + ## @param provisioning.nodeSelector Node labels for pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param provisioning.tolerations Tolerations for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param provisioning.extraProvisioningCommands Extra commands to run to provision cluster resources + ## - echo "Allow user to consume from any topic" + ## - >- + ## /opt/bitnami/kafka/bin/kafka-acls.sh + ## --bootstrap-server $KAFKA_SERVICE + ## --command-config $CLIENT_CONF + ## --add + ## --allow-principal User:user + ## --consumer --topic '*' + ## - "/opt/bitnami/kafka/bin/kafka-acls.sh + ## --bootstrap-server $KAFKA_SERVICE + ## --command-config $CLIENT_CONF + ## --list" + ## + extraProvisioningCommands: [] + ## @param provisioning.parallel Number of provisioning commands to run at the same time + ## + parallel: 1 + ## @param provisioning.preScript Extra bash script to run before topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations + ## + preScript: "" + ## @param provisioning.postScript Extra bash script to run after topic provisioning. $CLIENT_CONF is path to properties file with most needed configurations + ## + postScript: "" + ## Auth Configuration for kafka provisioning Job + ## + auth: + ## TLS configuration for kafka provisioning Job + ## + tls: + ## @param provisioning.auth.tls.type Format to use for TLS certificates. Allowed types: `jks` and `pem`. + ## Note: ignored if auth.tls.clientProtocol different from one of these values: "tls" "mtls" "sasl_tls". + ## + type: jks + ## @param provisioning.auth.tls.certificatesSecret Existing secret containing the TLS certificates for the Kafka provisioning Job. + ## When using 'jks' format for certificates, the secret should contain a truststore and a keystore. + ## When using 'pem' format for certificates, the secret should contain a public CA certificate, a public certificate and one private key. + ## + certificatesSecret: "" + ## @param provisioning.auth.tls.cert The secret key from the certificatesSecret if 'cert' key different from the default (tls.crt) + ## + cert: tls.crt + ## @param provisioning.auth.tls.key The secret key from the certificatesSecret if 'key' key different from the default (tls.key) + ## + key: tls.key + ## @param provisioning.auth.tls.caCert The secret key from the certificatesSecret if 'caCert' key different from the default (ca.crt) + ## + caCert: ca.crt + ## @param provisioning.auth.tls.keystore The secret key from the certificatesSecret if 'keystore' key different from the default (keystore.jks) + ## + keystore: keystore.jks + ## @param provisioning.auth.tls.truststore The secret key from the certificatesSecret if 'truststore' key different from the default (truststore.jks) + ## + truststore: truststore.jks + ## @param provisioning.auth.tls.passwordsSecret Name of the secret containing passwords to access the JKS files or PEM key when they are password-protected. + ## It should contain two keys called "keystore-password" and "truststore-password", or "key-password" if using a password-protected PEM key. + ## + passwordsSecret: "" + ## @param provisioning.auth.tls.keyPasswordSecretKey The secret key from the passwordsSecret if 'keyPasswordSecretKey' key different from the default (key-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + keyPasswordSecretKey: key-password + ## @param provisioning.auth.tls.keystorePasswordSecretKey The secret key from the passwordsSecret if 'keystorePasswordSecretKey' key different from the default (keystore-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + keystorePasswordSecretKey: keystore-password + ## @param provisioning.auth.tls.truststorePasswordSecretKey The secret key from the passwordsSecret if 'truststorePasswordSecretKey' key different from the default (truststore-password) + ## Note: must not be used if `passwordsSecret` is not defined. + ## + truststorePasswordSecretKey: truststore-password + ## @param provisioning.auth.tls.keyPassword Password to access the password-protected PEM key if necessary. Ignored if 'passwordsSecret' is provided. + ## + keyPassword: "" + ## @param provisioning.auth.tls.keystorePassword Password to access the JKS keystore. Ignored if 'passwordsSecret' is provided. + ## + keystorePassword: "" + ## @param provisioning.auth.tls.truststorePassword Password to access the JKS truststore. Ignored if 'passwordsSecret' is provided. + ## + truststorePassword: "" + ## @param provisioning.command Override provisioning container command + ## + command: [] + ## @param provisioning.args Override provisioning container arguments + ## + args: [] + ## @param provisioning.extraEnvVars Extra environment variables to add to the provisioning pod + ## e.g: + ## extraEnvVars: + ## - name: KAFKA_CFG_BACKGROUND_THREADS + ## value: "10" + ## + extraEnvVars: [] + ## @param provisioning.extraEnvVarsCM ConfigMap with extra environment variables + ## + extraEnvVarsCM: "" + ## @param provisioning.extraEnvVarsSecret Secret with extra environment variables + ## + extraEnvVarsSecret: "" + ## @param provisioning.podAnnotations Extra annotations for Kafka provisioning pods + ## + podAnnotations: {} + ## @param provisioning.podLabels Extra labels for Kafka provisioning pods + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## Kafka provisioning pods ServiceAccount + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param provisioning.serviceAccount.create Enable creation of ServiceAccount for Kafka provisioning pods + ## + create: false + ## @param provisioning.serviceAccount.name The name of the service account to use. If not set and `create` is `true`, a name is generated + ## If not set and create is true, a name is generated using the provisioning.serviceAccount.name template + ## + name: "" + ## @param provisioning.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: true + ## Kafka provisioning resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param provisioning.resources.limits The resources limits for the Kafka provisioning container + ## @param provisioning.resources.requests The requested resources for the Kafka provisioning container + ## + resources: + limits: {} + requests: {} + ## Kafka provisioning pods' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param provisioning.podSecurityContext.enabled Enable security context for the pods + ## @param provisioning.podSecurityContext.fsGroup Set Kafka provisioning pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + ## Kafka provisioning containers' Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param provisioning.containerSecurityContext.enabled Enable Kafka provisioning containers' Security Context + ## @param provisioning.containerSecurityContext.runAsUser Set Kafka provisioning containers' Security Context runAsUser + ## @param provisioning.containerSecurityContext.runAsNonRoot Set Kafka provisioning containers' Security Context runAsNonRoot + ## e.g: + ## containerSecurityContext: + ## enabled: true + ## capabilities: + ## drop: ["NET_RAW"] + ## readOnlyRootFilesystem: true + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + ## @param provisioning.schedulerName Name of the k8s scheduler (other than default) for kafka provisioning + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param provisioning.extraVolumes Optionally specify extra list of additional volumes for the Kafka provisioning pod(s) + ## e.g: + ## extraVolumes: + ## - name: kafka-jaas + ## secret: + ## secretName: kafka-jaas + ## + extraVolumes: [] + ## @param provisioning.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Kafka provisioning container(s) + ## extraVolumeMounts: + ## - name: kafka-jaas + ## mountPath: /bitnami/kafka/config/kafka_jaas.conf + ## subPath: kafka_jaas.conf + ## + extraVolumeMounts: [] + ## @param provisioning.sidecars Add additional sidecar containers to the Kafka provisioning pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param provisioning.initContainers Add additional Add init containers to the Kafka provisioning pod(s) + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + initContainers: [] + ## @param provisioning.waitForKafka If true use an init container to wait until kafka is ready before starting provisioning + ## + waitForKafka: true + +## @section ZooKeeper chart parameters + +## ZooKeeper chart configuration +## https://github.com/bitnami/charts/blob/main/bitnami/zookeeper/values.yaml ## zookeeper: + ## @param zookeeper.enabled Switch to enable or disable the ZooKeeper helm chart + ## enabled: true + ## @param zookeeper.replicaCount Number of ZooKeeper nodes + ## + replicaCount: 1 + ## ZooKeeper authenticaiton + ## auth: - ## Enable Zookeeper auth - ## - enabled: false - ## User that will use Zookeeper clients to auth - ## - # clientUser: - ## Password that will use Zookeeper clients to auth - ## - # clientPassword: - ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" - ## - # serverUsers: - ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" - ## - # serverPasswords: + client: + ## @param zookeeper.auth.client.enabled Enable ZooKeeper auth + ## + enabled: false + ## @param zookeeper.auth.client.clientUser User that will use ZooKeeper clients to auth + ## + clientUser: "" + ## @param zookeeper.auth.client.clientPassword Password that will use ZooKeeper clients to auth + ## + clientPassword: "" + ## @param zookeeper.auth.client.serverUsers Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param zookeeper.auth.client.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## ZooKeeper Persistence parameters + ## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ + ## @param zookeeper.persistence.enabled Enable persistence on ZooKeeper using PVC(s) + ## @param zookeeper.persistence.storageClass Persistent Volume storage class + ## @param zookeeper.persistence.accessModes Persistent Volume access modes + ## @param zookeeper.persistence.size Persistent Volume size + ## + persistence: + enabled: true + storageClass: "" + accessModes: + - ReadWriteOnce + size: 8Gi -## This value is only used when zookeeper.enabled is set to false +## External Zookeeper Configuration +## All of these values are only used if `zookeeper.enabled=false` ## externalZookeeper: - ## Server or list of external zookeeper servers to use. + ## @param externalZookeeper.servers List of external zookeeper servers to use. Typically used in combination with 'zookeeperChrootPath'. ## servers: []