openreplay/backend/pkg/db/postgres/bulks.go
Alexander 4b8f3bee25
Sessions refactoring (#1371)
* feat(backend): moved sql requests related to sessions table to one place

* feat(backend): refactoring in db.Saver handler

* feat(backend): hude refactoring in db/postgres module

* fix(backend): workable feature flags

* fix(backend): workable integrations

* fix(backend): workable sessions and projects modules

* fix(backend): added missed projects module to sessions

* feat(backend): renaming

* feat(backend): moved session struct to sessions module and split methods into interface, cache and storage levels

* feat(backend): moved project struct to projects module

* feat(backend): added projects model

* feat(backend): implemented new in memory cache for sessions and projects

* feat(backend): implemented new cache in projects

* feat(backend): there are 2 methods in cache module now: Get() and GetAndRefresh()

* feat(backend): added cache update operations

* fix(backend): fixed import cycle

* fix(backend): fixed panic in db message handler

* fix(backend): fixed panic in projects module

* fix(backend): fixed panic in sessions.GetDuration

* feat(backend): added direct call to get session duration if session is already in cache

* feat(backend): used pg pool everywhere except db service

* fix(backend): added missing part after rebase

* fix(backend): removed old sessions file

* feat(backend): added refactored redis client with produce/consume options

* feat(backend): added cache layer for projects

* fix(backend): added missing redis config

* fix(backend): added missing method for producer

* feat(backend): cache integration for sessions

* feat(backend): temporary method to get session directly from db

* feat(backend): adapt EE version of message handler

* fix(backend): fixed issue in fts realisation

* fix(backend): added redis cache to sessions module

* fix(backend): set 0 duration or hesitation time for inputs without focus event

* feat(backend): added cache for session updates and failover mechanism for batch.Insert() operation

* feat(backend): debug log

* feat(backend): more debug log

* feat(backend): removed debug log

* fix(backend): fixed an issue of tracking input events with empty label

* fix(backend): disabled debug log in projects cache

* fix(backend): renamed session updater

* fix(backend): fixed closed pool issue in DB service

* fix(backend): fixed dead lock in db Stop() method

* fix(backend): fixed panic in heuristics service

* feat(backend): enabled redis cache in projects

* feat(backend): clear cache on each update operation

* feat(backend): fully integrated cache layer with auto switch

* feat(backend): small refactoring in session updates

* fix(backend): fixed wrong events counter issue

* feat(backend): enabled full cache support in ender and http services

* fix(backend/ee): added missed import

* feat(backend): added second cache layer for db to speed up the service

* feat(backend): disable redis cache

* feat(backend): moved redis cache to ee
2023-07-06 10:55:43 +02:00

269 lines
7.7 KiB
Go

package postgres
import (
"log"
"openreplay/backend/pkg/db/postgres/pool"
)
type bulksTask struct {
bulks []Bulk
}
func NewBulksTask() *bulksTask {
return &bulksTask{bulks: make([]Bulk, 0, 15)}
}
type BulkSet struct {
c pool.Pool
autocompletes Bulk
requests Bulk
customEvents Bulk
webPageEvents Bulk
webInputEvents Bulk
webInputDurations Bulk
webGraphQL Bulk
webErrors Bulk
webErrorEvents Bulk
webErrorTags Bulk
webIssues Bulk
webIssueEvents Bulk
webCustomEvents Bulk
webClickEvents Bulk
webNetworkRequest Bulk
workerTask chan *bulksTask
done chan struct{}
finished chan struct{}
}
func NewBulkSet(c pool.Pool) *BulkSet {
bs := &BulkSet{
c: c,
workerTask: make(chan *bulksTask, 1),
done: make(chan struct{}),
finished: make(chan struct{}),
}
bs.initBulks()
go bs.worker()
return bs
}
func (conn *BulkSet) Get(name string) Bulk {
switch name {
case "autocompletes":
return conn.autocompletes
case "requests":
return conn.requests
case "customEvents":
return conn.customEvents
case "webPageEvents":
return conn.webPageEvents
case "webInputEvents":
return conn.webInputEvents
case "webInputDurations":
return conn.webInputDurations
case "webGraphQL":
return conn.webGraphQL
case "webErrors":
return conn.webErrors
case "webErrorEvents":
return conn.webErrorEvents
case "webErrorTags":
return conn.webErrorTags
case "webIssues":
return conn.webIssues
case "webIssueEvents":
return conn.webIssueEvents
case "webCustomEvents":
return conn.webCustomEvents
case "webClickEvents":
return conn.webClickEvents
case "webNetworkRequest":
return conn.webNetworkRequest
default:
return nil
}
}
func (conn *BulkSet) initBulks() {
var err error
conn.autocompletes, err = NewBulk(conn.c,
"autocomplete",
"(value, type, project_id)",
"($%d, $%d, $%d)",
3, 200)
if err != nil {
log.Fatalf("can't create autocomplete bulk: %s", err)
}
conn.requests, err = NewBulk(conn.c,
"events_common.requests",
"(session_id, timestamp, seq_index, url, duration, success)",
"($%d, $%d, $%d, LEFT($%d, 8000), $%d, $%d)",
6, 200)
if err != nil {
log.Fatalf("can't create requests bulk: %s", err)
}
conn.customEvents, err = NewBulk(conn.c,
"events_common.customs",
"(session_id, timestamp, seq_index, name, payload)",
"($%d, $%d, $%d, LEFT($%d, 2000), $%d)",
5, 200)
if err != nil {
log.Fatalf("can't create customEvents bulk: %s", err)
}
conn.webPageEvents, err = NewBulk(conn.c,
"events.pages",
"(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+
"load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+
"time_to_interactive, response_time, dom_building_time)",
"($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), "+
"NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0),"+
" NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0))",
18, 200)
if err != nil {
log.Fatalf("can't create webPageEvents bulk: %s", err)
}
conn.webInputEvents, err = NewBulk(conn.c,
"events.inputs",
"(session_id, message_id, timestamp, label)",
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000),''))",
4, 200)
if err != nil {
log.Fatalf("can't create webPageEvents bulk: %s", err)
}
conn.webInputDurations, err = NewBulk(conn.c,
"events.inputs",
"(session_id, message_id, timestamp, label, hesitation, duration)",
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000),''), $%d, $%d)",
6, 200)
if err != nil {
log.Fatalf("can't create webPageEvents bulk: %s", err)
}
conn.webGraphQL, err = NewBulk(conn.c,
"events.graphql",
"(session_id, timestamp, message_id, name, request_body, response_body)",
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
6, 200)
if err != nil {
log.Fatalf("can't create webPageEvents bulk: %s", err)
}
conn.webErrors, err = NewBulk(conn.c,
"errors",
"(error_id, project_id, source, name, message, payload)",
"($%d, $%d, $%d, $%d, $%d, $%d::jsonb)",
6, 200)
if err != nil {
log.Fatalf("can't create webErrors bulk: %s", err)
}
conn.webErrorEvents, err = NewBulk(conn.c,
"events.errors",
"(session_id, message_id, timestamp, error_id)",
"($%d, $%d, $%d, $%d)",
4, 200)
if err != nil {
log.Fatalf("can't create webErrorEvents bulk: %s", err)
}
conn.webErrorTags, err = NewBulk(conn.c,
"public.errors_tags",
"(session_id, message_id, error_id, key, value)",
"($%d, $%d, $%d, $%d, $%d)",
5, 200)
if err != nil {
log.Fatalf("can't create webErrorEvents bulk: %s", err)
}
conn.webIssues, err = NewBulk(conn.c,
"issues",
"(project_id, issue_id, type, context_string)",
"($%d, $%d, $%d, $%d)",
4, 200)
if err != nil {
log.Fatalf("can't create webIssues bulk: %s", err)
}
conn.webIssueEvents, err = NewBulk(conn.c,
"events_common.issues",
"(session_id, issue_id, timestamp, seq_index, payload)",
"($%d, $%d, $%d, $%d, CAST($%d AS jsonb))",
5, 200)
if err != nil {
log.Fatalf("can't create webIssueEvents bulk: %s", err)
}
conn.webCustomEvents, err = NewBulk(conn.c,
"events_common.customs",
"(session_id, seq_index, timestamp, name, payload, level)",
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
6, 200)
if err != nil {
log.Fatalf("can't create webCustomEvents bulk: %s", err)
}
conn.webClickEvents, err = NewBulk(conn.c,
"events.clicks",
"(session_id, message_id, timestamp, label, selector, url, path, hesitation)",
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d)",
8, 200)
if err != nil {
log.Fatalf("can't create webClickEvents bulk: %s", err)
}
conn.webNetworkRequest, err = NewBulk(conn.c,
"events_common.requests",
"(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success)",
"($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d)",
13, 200)
if err != nil {
log.Fatalf("can't create webNetworkRequest bulk: %s", err)
}
}
func (conn *BulkSet) Send() {
newTask := NewBulksTask()
// Prepare set of bulks to send
newTask.bulks = append(newTask.bulks, conn.autocompletes)
newTask.bulks = append(newTask.bulks, conn.requests)
newTask.bulks = append(newTask.bulks, conn.customEvents)
newTask.bulks = append(newTask.bulks, conn.webPageEvents)
newTask.bulks = append(newTask.bulks, conn.webInputEvents)
newTask.bulks = append(newTask.bulks, conn.webInputDurations)
newTask.bulks = append(newTask.bulks, conn.webGraphQL)
newTask.bulks = append(newTask.bulks, conn.webErrors)
newTask.bulks = append(newTask.bulks, conn.webErrorEvents)
newTask.bulks = append(newTask.bulks, conn.webErrorTags)
newTask.bulks = append(newTask.bulks, conn.webIssues)
newTask.bulks = append(newTask.bulks, conn.webIssueEvents)
newTask.bulks = append(newTask.bulks, conn.webCustomEvents)
newTask.bulks = append(newTask.bulks, conn.webClickEvents)
newTask.bulks = append(newTask.bulks, conn.webNetworkRequest)
conn.workerTask <- newTask
// Reset new bulks
conn.initBulks()
}
func (conn *BulkSet) Stop() {
conn.done <- struct{}{}
<-conn.finished
}
func (conn *BulkSet) sendBulks(t *bulksTask) {
for _, bulk := range t.bulks {
if err := bulk.Send(); err != nil {
log.Printf("%s bulk send err: %s", bulk.Table(), err)
}
}
}
func (conn *BulkSet) worker() {
for {
select {
case t := <-conn.workerTask:
conn.sendBulks(t)
case <-conn.done:
if len(conn.workerTask) > 0 {
for t := range conn.workerTask {
conn.sendBulks(t)
}
}
conn.finished <- struct{}{}
return
}
}
}