Merge pull request #232 from openreplay/dev

v1.3.6ee data fixes
This commit is contained in:
Mehdi Osman 2021-12-04 16:12:29 +01:00 committed by GitHub
commit bd3d59dfb3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 57 additions and 49 deletions

View file

@ -1,13 +1,13 @@
BEGIN;
CREATE INDEX sessions_user_id_useridNN_idx ON sessions (user_id) WHERE user_id IS NOT NULL;
CREATE INDEX sessions_uid_projectid_startts_sessionid_uidNN_durGTZ_idx ON sessions (user_id, project_id, start_ts, session_id) WHERE user_id IS NOT NULL AND duration > 0;
CREATE INDEX pages_base_path_base_pathLNGT2_idx ON events.pages (base_path) WHERE length(base_path) > 2;
CREATE INDEX IF NOT EXISTS sessions_user_id_useridNN_idx ON sessions (user_id) WHERE user_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS sessions_uid_projectid_startts_sessionid_uidNN_durGTZ_idx ON sessions (user_id, project_id, start_ts, session_id) WHERE user_id IS NOT NULL AND duration > 0;
CREATE INDEX IF NOT EXISTS pages_base_path_base_pathLNGT2_idx ON events.pages (base_path) WHERE length(base_path) > 2;
CREATE INDEX users_tenant_id_deleted_at_N_idx ON users (tenant_id) WHERE deleted_at ISNULL;
CREATE INDEX issues_issue_id_timestamp_idx ON events_common.issues (issue_id, timestamp);
CREATE INDEX issues_timestamp_idx ON events_common.issues (timestamp);
CREATE INDEX issues_project_id_issue_id_idx ON public.issues (project_id, issue_id);
CREATE INDEX IF NOT EXISTS users_tenant_id_deleted_at_N_idx ON users (tenant_id) WHERE deleted_at ISNULL;
CREATE INDEX IF NOT EXISTS issues_issue_id_timestamp_idx ON events_common.issues (issue_id, timestamp);
CREATE INDEX IF NOT EXISTS issues_timestamp_idx ON events_common.issues (timestamp);
CREATE INDEX IF NOT EXISTS issues_project_id_issue_id_idx ON public.issues (project_id, issue_id);
CREATE TABLE roles
(
@ -69,4 +69,4 @@ ALTER TABLE public.users
ALTER TABLE public.users
ALTER COLUMN origin TYPE text;
DROP TYPE IF EXISTS user_origin;
COMMIT;
COMMIT;

View file

@ -1,16 +1,16 @@
BEGIN;
CREATE INDEX sessions_user_id_useridNN_idx ON sessions (user_id) WHERE user_id IS NOT NULL;
CREATE INDEX sessions_uid_projectid_startts_sessionid_uidNN_durGTZ_idx ON sessions (user_id, project_id, start_ts, session_id) WHERE user_id IS NOT NULL AND duration > 0;
CREATE INDEX pages_base_path_base_pathLNGT2_idx ON events.pages (base_path) WHERE length(base_path) > 2;
CREATE INDEX IF NOT EXISTS sessions_user_id_useridNN_idx ON sessions (user_id) WHERE user_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS sessions_uid_projectid_startts_sessionid_uidNN_durGTZ_idx ON sessions (user_id, project_id, start_ts, session_id) WHERE user_id IS NOT NULL AND duration > 0;
CREATE INDEX IF NOT EXISTS pages_base_path_base_pathLNGT2_idx ON events.pages (base_path) WHERE length(base_path) > 2;
CREATE INDEX clicks_session_id_timestamp_idx ON events.clicks (session_id, timestamp);
CREATE INDEX errors_error_id_idx ON errors (error_id);
CREATE INDEX errors_error_id_idx ON events.errors (error_id);
CREATE INDEX IF NOT EXISTS clicks_session_id_timestamp_idx ON events.clicks (session_id, timestamp);
CREATE INDEX IF NOT EXISTS errors_error_id_idx ON errors (error_id);
CREATE INDEX IF NOT EXISTS errors_error_id_idx ON events.errors (error_id);
CREATE INDEX issues_issue_id_timestamp_idx ON events_common.issues(issue_id,timestamp);
CREATE INDEX issues_timestamp_idx ON events_common.issues (timestamp);
CREATE INDEX issues_project_id_issue_id_idx ON public.issues (project_id, issue_id);
CREATE INDEX IF NOT EXISTS issues_issue_id_timestamp_idx ON events_common.issues(issue_id,timestamp);
CREATE INDEX IF NOT EXISTS issues_timestamp_idx ON events_common.issues (timestamp);
CREATE INDEX IF NOT EXISTS issues_project_id_issue_id_idx ON public.issues (project_id, issue_id);
COMMIT;
COMMIT;

View file

@ -1 +0,0 @@

View file

@ -30,18 +30,17 @@
db_path: []
- name: generating migration db paths
set_fact:
db_path: "{{dst_list | default([])}} + [ '{{ item[0] }}/*.sql' ]"
db_path: "{{ db_path }} + [ '{{playbook_dir}}/db/init_dbs/clickhouse/{{ item }}/*.sql' ]"
with_items: "{{ migration_versions.split(',') }}"
- name: Restoring clickhouse data
shell: |
file="{{ item|basename }}"
kubectl exec -n db clickhouse-0 -- /bin/bash -c "rm -rf /tmp/$file"
kubectl cp -n db $file clickhouse-0:/tmp/
kubectl cp -n db {{ item }} clickhouse-0:/tmp/
kubectl exec -n db clickhouse-0 -- /bin/bash -c "clickhouse-client < /tmp/$file" 2>&1 | tee -a "{{ playbook_dir }}"/clickhouse_init.log
args:
chdir: db/init_dbs/clickhouse/create
with_fileglob:
- "{{ db_path }}"
chdir: db/init_dbs/clickhouse/
with_fileglob: "{{ db_path }}"
retries: 3
delay: 60
register: result

View file

@ -2,6 +2,8 @@
# upgrade.sh v1.10
set -e
cwd=$PWD
openreplay_old_dir=$1
vars_file_path=${openreplay_old_dir}/scripts/helm/vars.yaml
@ -35,7 +37,7 @@ migration(){
# Ref: https://stackoverflow.com/questions/1527049/how-can-i-join-elements-of-an-array-in-bash
# Creating an array of versions to migrate.
db=$1
migration_versions=(`ls -l db/init_dbs/$db | grep -E ^d | awk -v number=${old_version} '$NF > number {print $NF}'`)
migration_versions=(`ls -l db/init_dbs/$db | grep -E ^d | awk -v number=${old_version} '$NF > number {print $NF}' | grep -v create`)
# Can't pass the space seperated array to ansible for migration. So joining them with ,
joined_migration_versions=$(IFS=, ; echo "${migration_versions[*]}")
@ -65,8 +67,11 @@ patch(){
)
for var in ${vars[@]};do
# Get old value
old_val=`grep $var ${openreplay_old_dir}/scripts/helm/app/chalice.yaml|xargs`
sed -i "s/${var}.*/$old_val/g" app/chalice.yaml
old_val=`grep $var ${openreplay_old_dir}/scripts/helm/app/chalice.yaml| cut -d" " -f4|xargs`
# Coverting caps env var to small ansible variable.
# In chalice EMAIL_HOST translates to email_host in vars.yaml
# Ref: https://stackoverflow.com/questions/2264428/how-to-convert-a-string-to-lower-case-in-bash
sed -i "s#${var,,}.*#${var,,}: \"$old_val\"#g" vars.yaml
done
}
@ -74,6 +79,11 @@ patch(){
patch
installation_type=1
if [[ ${ENTERPRISE} -eq 1 ]]; then
cp -rf ../../ee/scripts/helm/db/* db/
echo -e "Migrating clickhouse"
migration clickhouse
fi
echo -e "Migrating postgresql"
migration postgresql

View file

@ -88,25 +88,25 @@ db_resource_override:
clickhouse: {{ db_resource_override.clickhouse|default({}) }}
## Sane defaults
s3_endpoint: "{{ s3_endpoint }}"
aws_region: "{{ aws_region }}"
assets_bucket: "{{ assets_bucket }}"
recordings_bucket: "{{ recordings_bucket }}"
sourcemaps_bucket: "{{ sourcemaps_bucket }}"
kafka_endpoint: "{{ kafka_endpoint }}"
kafka_ssl: "{{ kafka_ssl }}"
postgres_endpoint: "{{ postgres_endpoint }}"
postgres_port: "{{ postgres_port }}"
postgres_db_name: "{{ postgres_db_name }}"
postgres_db_user: "{{ postgres_db_user }}"
postgres_db_password: "{{ postgres_db_password }}"
redis_endpoint: "{{ redis_endpoint }}"
email_host: "{{ email_host }}"
email_port: "{{ email_port }}"
email_user: "{{ email_user }}"
email_password: "{{ email_password }}"
email_use_tls: "{{ email_use_tls }}"
email_use_ssl: "{{ email_use_ssl }}"
email_ssl_key: "{{ email_ssl_key }}"
email_ssl_cert: "{{ email_ssl_cert }}"
email_from: "{{ email_from }}"
s3_endpoint: "http://minio.db.svc.cluster.local:9000"
aws_region: "us-east-1"
assets_bucket: sessions-assets
recordings_bucket: mobs
sourcemaps_bucket: sourcemaps
kafka_endpoint: kafka.db.svc.cluster.local:9042
kafka_ssl: 'false'
postgres_endpoint: postgresql.db.svc.cluster.local
postgres_port: 5432
postgres_db_name: postgres
postgres_db_user: postgres
postgres_db_password: asayerPostgres
redis_endpoint: redis-master.db.svc.cluster.local:6379
email_host: ''
email_port: '587'
email_user: ''
email_password: ''
email_use_tls: 'true'
email_use_ssl: 'false'
email_ssl_key: ''
email_ssl_cert: ''
email_from: OpenReplay<do-not-reply@openreplay.com>