openreplay/backend/pkg/integrations/integrations.go
Alexander 4b8f3bee25
Sessions refactoring (#1371)
* feat(backend): moved sql requests related to sessions table to one place

* feat(backend): refactoring in db.Saver handler

* feat(backend): hude refactoring in db/postgres module

* fix(backend): workable feature flags

* fix(backend): workable integrations

* fix(backend): workable sessions and projects modules

* fix(backend): added missed projects module to sessions

* feat(backend): renaming

* feat(backend): moved session struct to sessions module and split methods into interface, cache and storage levels

* feat(backend): moved project struct to projects module

* feat(backend): added projects model

* feat(backend): implemented new in memory cache for sessions and projects

* feat(backend): implemented new cache in projects

* feat(backend): there are 2 methods in cache module now: Get() and GetAndRefresh()

* feat(backend): added cache update operations

* fix(backend): fixed import cycle

* fix(backend): fixed panic in db message handler

* fix(backend): fixed panic in projects module

* fix(backend): fixed panic in sessions.GetDuration

* feat(backend): added direct call to get session duration if session is already in cache

* feat(backend): used pg pool everywhere except db service

* fix(backend): added missing part after rebase

* fix(backend): removed old sessions file

* feat(backend): added refactored redis client with produce/consume options

* feat(backend): added cache layer for projects

* fix(backend): added missing redis config

* fix(backend): added missing method for producer

* feat(backend): cache integration for sessions

* feat(backend): temporary method to get session directly from db

* feat(backend): adapt EE version of message handler

* fix(backend): fixed issue in fts realisation

* fix(backend): added redis cache to sessions module

* fix(backend): set 0 duration or hesitation time for inputs without focus event

* feat(backend): added cache for session updates and failover mechanism for batch.Insert() operation

* feat(backend): debug log

* feat(backend): more debug log

* feat(backend): removed debug log

* fix(backend): fixed an issue of tracking input events with empty label

* fix(backend): disabled debug log in projects cache

* fix(backend): renamed session updater

* fix(backend): fixed closed pool issue in DB service

* fix(backend): fixed dead lock in db Stop() method

* fix(backend): fixed panic in heuristics service

* feat(backend): enabled redis cache in projects

* feat(backend): clear cache on each update operation

* feat(backend): fully integrated cache layer with auto switch

* feat(backend): small refactoring in session updates

* fix(backend): fixed wrong events counter issue

* feat(backend): enabled full cache support in ender and http services

* fix(backend/ee): added missed import

* feat(backend): added second cache layer for db to speed up the service

* feat(backend): disable redis cache

* feat(backend): moved redis cache to ee
2023-07-06 10:55:43 +02:00

99 lines
2.2 KiB
Go

package integrations
import (
"context"
"encoding/json"
"fmt"
"openreplay/backend/pkg/db/postgres/pool"
"github.com/jackc/pgx/v4"
)
type Listener struct {
conn *pgx.Conn
db pool.Pool
Integrations chan *Integration
Errors chan error
}
type Integration struct {
ProjectID uint32 `json:"project_id"`
Provider string `json:"provider"`
RequestData json.RawMessage `json:"request_data"`
Options json.RawMessage `json:"options"`
}
func New(db pool.Pool, url string) (*Listener, error) {
conn, err := pgx.Connect(context.Background(), url)
if err != nil {
return nil, err
}
listener := &Listener{
conn: conn,
db: db,
Errors: make(chan error),
}
listener.Integrations = make(chan *Integration, 50)
if _, err := conn.Exec(context.Background(), "LISTEN integration"); err != nil {
return nil, err
}
go listener.listen()
return listener, nil
}
func (listener *Listener) listen() {
for {
notification, err := listener.conn.WaitForNotification(context.Background())
if err != nil {
listener.Errors <- err
continue
}
switch notification.Channel {
case "integration":
integrationP := new(Integration)
if err := json.Unmarshal([]byte(notification.Payload), integrationP); err != nil {
listener.Errors <- fmt.Errorf("%v | Payload: %v", err, notification.Payload)
} else {
listener.Integrations <- integrationP
}
}
}
}
func (listener *Listener) Close() error {
return listener.conn.Close(context.Background())
}
func (listener *Listener) IterateIntegrationsOrdered(iter func(integration *Integration, err error)) error {
rows, err := listener.db.Query(`
SELECT project_id, provider, options, request_data
FROM integrations
`)
if err != nil {
return err
}
defer rows.Close()
for rows.Next() {
i := new(Integration)
if err := rows.Scan(&i.ProjectID, &i.Provider, &i.Options, &i.RequestData); err != nil {
iter(nil, err)
continue
}
iter(i, nil)
}
if err = rows.Err(); err != nil {
return err
}
return nil
}
func (listener *Listener) UpdateIntegrationRequestData(i *Integration) error {
return listener.db.Exec(`
UPDATE integrations
SET request_data = $1
WHERE project_id=$2 AND provider=$3`,
i.RequestData, i.ProjectID, i.Provider,
)
}