removing duplicate helm chart
This commit is contained in:
parent
975333c084
commit
eae1d6b763
11 changed files with 0 additions and 546 deletions
|
|
@ -1,23 +0,0 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
apiVersion: v2
|
||||
name: database-migrate
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "v1.3.6"
|
||||
|
|
@ -1,38 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
clickhousedir=/opt/openreplay/openreplay/scripts/helm/db/init_dbs/clickhouse
|
||||
|
||||
function migrate() {
|
||||
echo "Starting clickhouse migration"
|
||||
migration_versions=$1
|
||||
for version in $migration_versions; do
|
||||
echo "Migrating clickhouse version $version"
|
||||
# For now, we can ignore the clickhouse db inject errors.
|
||||
# TODO: Better error handling in script
|
||||
clickhouse-client -h clickhouse.db.svc.cluster.local --port 9000 < ${clickhousedir}/${version}/${version}.sql || true
|
||||
done
|
||||
}
|
||||
|
||||
function init() {
|
||||
echo "Initializing clickhouse"
|
||||
for file in `ls ${clickhousedir}/create/*.sql`; do
|
||||
echo "Injecting $file"
|
||||
clickhouse-client -h clickhouse.db.svc.cluster.local --port 9000 < $file
|
||||
done
|
||||
}
|
||||
|
||||
# /bin/bash clickhouse.sh migrate $migration_versions
|
||||
case "$1" in
|
||||
migrate)
|
||||
migrate $2
|
||||
;;
|
||||
init)
|
||||
init
|
||||
;;
|
||||
*)
|
||||
echo "Unknown operation for clickhouse migration; exiting."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
|
@ -1,76 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
cd $(dirname $0)
|
||||
|
||||
function migration() {
|
||||
ls -la /opt/openreplay/openreplay
|
||||
db=$1
|
||||
|
||||
# Checking if previous app version is set.
|
||||
if [[ $PREVIOUS_APP_VERSION == "" ]]; then
|
||||
echo "Previous app verision to be migrated is not set. Rerun using --set fromVersion=v1.3.5"
|
||||
exit 100
|
||||
fi
|
||||
|
||||
if [[ $PREVIOUS_APP_VERSION == $CHART_APP_VERSION ]]; then
|
||||
echo "No application version change. Not upgrading."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Checking migration versions
|
||||
cd /opt/openreplay/openreplay/scripts/helm
|
||||
migration_versions=(`ls -l db/init_dbs/$db | grep -E ^d | awk -v number=${PREVIOUS_APP_VERSION} '$NF > number {print $NF}' | grep -v create`)
|
||||
echo "Migration version: $migration_versions"
|
||||
|
||||
cd -
|
||||
|
||||
case "$1" in
|
||||
postgresql)
|
||||
/bin/bash postgresql.sh migrate $migration_versions
|
||||
;;
|
||||
clickhouse)
|
||||
/bin/bash clickhouse.sh migrate $migration_versions
|
||||
;;
|
||||
kafka)
|
||||
/bin/bash kafka.sh migrate $migration_versions
|
||||
;;
|
||||
*)
|
||||
echo "Unknown operation for db migration; exiting."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
function init(){
|
||||
case $1 in
|
||||
postgresql)
|
||||
/bin/bash postgresql.sh init
|
||||
;;
|
||||
clickhouse)
|
||||
/bin/bash clickhouse.sh init
|
||||
;;
|
||||
kafka)
|
||||
/bin/bash kafka.sh init
|
||||
;;
|
||||
*)
|
||||
echo "Unknown operation for db init; exiting."
|
||||
exit 1
|
||||
;;
|
||||
|
||||
esac
|
||||
}
|
||||
|
||||
|
||||
# dbops.sh true(upgrade) clickhouse
|
||||
case "$1" in
|
||||
"false")
|
||||
init $2
|
||||
;;
|
||||
"true")
|
||||
migration $2
|
||||
;;
|
||||
*)
|
||||
echo "Unknown operation for db migration; exiting."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
topics=(
|
||||
"raw"
|
||||
"raw-ios"
|
||||
"trigger"
|
||||
"cache"
|
||||
"analytics"
|
||||
)
|
||||
|
||||
touch /tmp/config.txt
|
||||
|
||||
if [[ $KAFKA_SSL == "true" ]]; then
|
||||
echo 'securty.protocol=SSL' > /tmp/config.txt
|
||||
fi
|
||||
|
||||
function init() {
|
||||
echo "Initializing kafka"
|
||||
for topic in ${topics[*]}; do
|
||||
echo "Creating topic: $topic"
|
||||
# TODO: Have to check an idempotent way of creating topics.
|
||||
kafka-topics.sh --create --bootstrap-server ${KAFKA_HOST}:${KAFKA_PORT} --replication-factor 2 --partitions 16 --topic ${topic} --command-config /tmp/config.txt || true
|
||||
kafka-configs.sh --bootstrap-server ${KAFKA_HOST}:${KAFKA_PORT} --entity-type topics --alter --add-config retention.ms=3456000000 --entity-name=${topic} --command-config /tmp/config.txt || true
|
||||
done
|
||||
}
|
||||
|
||||
# /bin/bash kafka.sh migrate $migration_versions
|
||||
case "$1" in
|
||||
migrate)
|
||||
init
|
||||
;;
|
||||
init)
|
||||
init
|
||||
;;
|
||||
*)
|
||||
echo "Unknown operation for kafka migration; exiting."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
|
@ -1,41 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
pgdir=/opt/openreplay/openreplay/scripts/helm/db/init_dbs/postgresql
|
||||
|
||||
# ENV variables
|
||||
# Ref: https://www.postgresql.org/docs/current/libpq-envars.html
|
||||
# $PGHOST
|
||||
# $PGPORT
|
||||
# $PGDATABASE
|
||||
# $PGUSER
|
||||
# $PGPASSWORD
|
||||
|
||||
function migrate() {
|
||||
echo "Starting postgresql migration"
|
||||
migration_versions=$1
|
||||
for version in $migration_versions; do
|
||||
echo "Migrating postgresql version $version"
|
||||
psql -f ${pgdir}/${version}/${version}.sql
|
||||
done
|
||||
}
|
||||
|
||||
function init() {
|
||||
echo "Initializing postgresql"
|
||||
psql -f ${pgdir}/init_schema.sql
|
||||
}
|
||||
|
||||
# /bin/bash postgresql.sh migrate $migration_versions
|
||||
case "$1" in
|
||||
migrate)
|
||||
migrate $2
|
||||
;;
|
||||
init)
|
||||
init
|
||||
;;
|
||||
*)
|
||||
echo "Unknown operation for postgresql migration; exiting."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
|
@ -1,123 +0,0 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: db-migration-script
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install, pre-upgrade
|
||||
"helm.sh/hook-weight": "-6" # Higher precidence, so the first the config map will get created.
|
||||
data:
|
||||
{{- (.Files.Glob "files/*").AsConfig | nindent 2 }}
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: databases-migrate
|
||||
labels:
|
||||
app: databases
|
||||
annotations:
|
||||
"helm.sh/hook": pre-install, pre-upgrade
|
||||
"helm.sh/hook-weight": "-5"
|
||||
spec:
|
||||
backoffLimit: 0 # Don't restart failing containers
|
||||
template:
|
||||
metadata:
|
||||
name: postgresqlMigrate
|
||||
spec:
|
||||
initContainers:
|
||||
- name: git
|
||||
image: alpine/git
|
||||
env:
|
||||
- name: ENTERPRISE_EDITION_LICENSE
|
||||
value: "{{ .Values.global.enterpriseEditionLicense }}"
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
args:
|
||||
- |
|
||||
git clone https://github.com/openreplay/openreplay -b {{ .Chart.AppVersion }} /opt/openreplay/openreplay
|
||||
if [ ENTERPRISE_EDITION_LICENSE != "" ]; then
|
||||
cd /opt/openreplay/openreplay
|
||||
cp -rf ee/scripts/* scripts/
|
||||
fi
|
||||
volumeMounts:
|
||||
- name: shared
|
||||
mountPath: /opt/openreplay
|
||||
containers:
|
||||
- name: postgres
|
||||
env:
|
||||
- name: PREVIOUS_APP_VERSION
|
||||
value: "{{ .Values.fromVersion }}"
|
||||
- name: CHART_APP_VERSION
|
||||
value: "{{ .Chart.AppVersion }}"
|
||||
- name: PGHOST
|
||||
value: "{{ .Values.postgresql.postgresqlHost }}"
|
||||
- name: PGPORT
|
||||
value: "{{ .Values.postgresql.postgresqlPort }}"
|
||||
- name: PGDATABASE
|
||||
value: "{{ .Values.postgresql.postgresqlDatabase }}"
|
||||
- name: PGUSER
|
||||
value: "{{ .Values.postgresql.postgresqlUser }}"
|
||||
- name: PGPASSWORD
|
||||
value: "{{ .Values.postgresql.postgresqlPassword }}"
|
||||
image: bitnami/postgresql:13.3.0-debian-10-r53
|
||||
command:
|
||||
- /bin/bash
|
||||
- /opt/migrations/dbops.sh
|
||||
- "{{ .Release.IsUpgrade }}"
|
||||
- "postgresql"
|
||||
volumeMounts:
|
||||
- name: shared
|
||||
mountPath: /opt/openreplay
|
||||
- name: dbmigrationscript
|
||||
mountPath: /opt/migrations/
|
||||
{{- if .Values.global.enterpriseEditionLicense }}
|
||||
# Enterprise migration
|
||||
- name: clickhouse
|
||||
image: yandex/clickhouse-client:20.9
|
||||
env:
|
||||
- name: PREVIOUS_APP_VERSION
|
||||
value: "{{ .Values.fromVersion }}"
|
||||
- name: CHART_APP_VERSION
|
||||
value: "{{ .Chart.AppVersion }}"
|
||||
command:
|
||||
- /bin/bash
|
||||
- /opt/migrations/dbops.sh
|
||||
- "{{ .Release.IsUpgrade }}"
|
||||
- clickhouse
|
||||
volumeMounts:
|
||||
- name: shared
|
||||
mountPath: /opt/openreplay
|
||||
- name: dbmigrationscript
|
||||
mountPath: /opt/migrations/
|
||||
- name: kafka
|
||||
image: bitnami/kafka:2.6.0-debian-10-r30
|
||||
env:
|
||||
- name: KAFKA_HOST
|
||||
value: "{{ .Values.kafka.kafkaHost }}"
|
||||
- name: KAFKA_PORT
|
||||
value: "{{ .Values.kafka.kafkaPort }}"
|
||||
- name: KAFKA_SSL
|
||||
value: "{{ .Values.kafka.kafkaUseSsl }}"
|
||||
- name: PREVIOUS_APP_VERSION
|
||||
value: "{{ .Values.fromVersion }}"
|
||||
- name: CHART_APP_VERSION
|
||||
value: "{{ .Chart.AppVersion }}"
|
||||
command:
|
||||
- /bin/bash
|
||||
- /opt/migrations/dbops.sh
|
||||
- "{{ .Release.IsUpgrade }}"
|
||||
- kafka
|
||||
volumeMounts:
|
||||
- name: shared
|
||||
mountPath: /opt/openreplay
|
||||
- name: dbmigrationscript
|
||||
mountPath: /opt/migrations/
|
||||
{{- end}}
|
||||
volumes:
|
||||
- name: dbmigrationscript
|
||||
configMap:
|
||||
name: db-migration-script
|
||||
- name: shared
|
||||
emptyDir: {}
|
||||
restartPolicy: Never
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "database-migrate.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "database-migrate.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "database-migrate.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "database-migrate.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
||||
|
|
@ -1,62 +0,0 @@
|
|||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "database-migrate.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "database-migrate.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "database-migrate.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "database-migrate.labels" -}}
|
||||
helm.sh/chart: {{ include "database-migrate.chart" . }}
|
||||
{{ include "database-migrate.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "database-migrate.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "database-migrate.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "database-migrate.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "database-migrate.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "database-migrate.serviceAccountName" . }}
|
||||
labels:
|
||||
{{- include "database-migrate.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -1,84 +0,0 @@
|
|||
# Default values for database-migrate.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: nginx
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 100
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
fromVersion: ""
|
||||
Loading…
Add table
Reference in a new issue