openreplay/backend/pkg/db/postgres/connector.go
Alexander 4b8f3bee25
Sessions refactoring (#1371)
* feat(backend): moved sql requests related to sessions table to one place

* feat(backend): refactoring in db.Saver handler

* feat(backend): hude refactoring in db/postgres module

* fix(backend): workable feature flags

* fix(backend): workable integrations

* fix(backend): workable sessions and projects modules

* fix(backend): added missed projects module to sessions

* feat(backend): renaming

* feat(backend): moved session struct to sessions module and split methods into interface, cache and storage levels

* feat(backend): moved project struct to projects module

* feat(backend): added projects model

* feat(backend): implemented new in memory cache for sessions and projects

* feat(backend): implemented new cache in projects

* feat(backend): there are 2 methods in cache module now: Get() and GetAndRefresh()

* feat(backend): added cache update operations

* fix(backend): fixed import cycle

* fix(backend): fixed panic in db message handler

* fix(backend): fixed panic in projects module

* fix(backend): fixed panic in sessions.GetDuration

* feat(backend): added direct call to get session duration if session is already in cache

* feat(backend): used pg pool everywhere except db service

* fix(backend): added missing part after rebase

* fix(backend): removed old sessions file

* feat(backend): added refactored redis client with produce/consume options

* feat(backend): added cache layer for projects

* fix(backend): added missing redis config

* fix(backend): added missing method for producer

* feat(backend): cache integration for sessions

* feat(backend): temporary method to get session directly from db

* feat(backend): adapt EE version of message handler

* fix(backend): fixed issue in fts realisation

* fix(backend): added redis cache to sessions module

* fix(backend): set 0 duration or hesitation time for inputs without focus event

* feat(backend): added cache for session updates and failover mechanism for batch.Insert() operation

* feat(backend): debug log

* feat(backend): more debug log

* feat(backend): removed debug log

* fix(backend): fixed an issue of tracking input events with empty label

* fix(backend): disabled debug log in projects cache

* fix(backend): renamed session updater

* fix(backend): fixed closed pool issue in DB service

* fix(backend): fixed dead lock in db Stop() method

* fix(backend): fixed panic in heuristics service

* feat(backend): enabled redis cache in projects

* feat(backend): clear cache on each update operation

* feat(backend): fully integrated cache layer with auto switch

* feat(backend): small refactoring in session updates

* fix(backend): fixed wrong events counter issue

* feat(backend): enabled full cache support in ender and http services

* fix(backend/ee): added missed import

* feat(backend): added second cache layer for db to speed up the service

* feat(backend): disable redis cache

* feat(backend): moved redis cache to ee
2023-07-06 10:55:43 +02:00

66 lines
1.5 KiB
Go

package postgres
import (
"log"
"openreplay/backend/pkg/db/postgres/batch"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/sessions"
)
type CH interface {
InsertAutocomplete(session *sessions.Session, msgType, msgValue string) error
}
// Conn contains batches, bulks and cache for all sessions
type Conn struct {
Pool pool.Pool
batches *batch.BatchSet
bulks *BulkSet
chConn CH // hack for autocomplete inserts, TODO: rewrite
}
func (conn *Conn) SetClickHouse(ch CH) {
conn.chConn = ch
}
func NewConn(pool pool.Pool) *Conn {
if pool == nil {
log.Fatalf("pool is nil")
}
return &Conn{
Pool: pool,
bulks: NewBulkSet(pool),
batches: batch.NewBatchSet(pool),
}
}
func (conn *Conn) Close() error {
conn.bulks.Stop()
conn.batches.Stop()
return nil
}
func (conn *Conn) InsertAutocompleteValue(sessionID uint64, projectID uint32, tp string, value string) {
if len(value) == 0 {
return
}
if err := conn.bulks.Get("autocompletes").Append(value, tp, projectID); err != nil {
log.Printf("autocomplete bulk err: %s", err)
}
if conn.chConn == nil {
return
}
// Send autocomplete data to clickhouse
if err := conn.chConn.InsertAutocomplete(&sessions.Session{SessionID: sessionID, ProjectID: projectID}, tp, value); err != nil {
log.Printf("click house autocomplete err: %s", err)
}
}
func (conn *Conn) BatchQueue(sessionID uint64, sql string, args ...interface{}) {
conn.batches.BatchQueue(sessionID, sql, args...)
}
func (conn *Conn) Commit() {
conn.bulks.Send()
conn.batches.Commit()
}