Fixed issue with wrong expiration timeout for projects (#1300)

* fix(backend): fixed issue with wrong expiration timeout for projects in cache layer

* fix(backend): removed direct call db.GetProject()

* feat(backend): set default PROJECT_EXPIRATION to 10 minutes
This commit is contained in:
Alexander 2023-06-08 18:08:53 +02:00 committed by GitHub
parent 99b196156c
commit 8e58c367dd
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 30 additions and 27 deletions

View file

@ -26,7 +26,7 @@ func main() {
// Init database
pg := cache.NewPGCache(
postgres.NewConn(cfg.Postgres.String(), cfg.BatchQueueLimit, cfg.BatchSizeLimit), cfg.ProjectExpirationTimeoutMs)
postgres.NewConn(cfg.Postgres.String(), cfg.BatchQueueLimit, cfg.BatchSizeLimit), cfg.ProjectExpiration)
defer pg.Close()
// Init data saver

View file

@ -31,7 +31,7 @@ func main() {
cfg := ender.New()
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0), cfg.ProjectExpirationTimeoutMs)
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0), cfg.ProjectExpiration)
defer pg.Close()
sessions, err := sessionender.New(intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)

View file

@ -32,7 +32,7 @@ func main() {
defer producer.Close(15000)
// Connect to database
dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0), 1000*60*20)
dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0), cfg.ProjectExpiration)
defer dbConn.Close()
// Build all services

View file

@ -9,17 +9,17 @@ import (
type Config struct {
common.Config
common.Postgres
ProjectExpirationTimeoutMs int64 `env:"PROJECT_EXPIRATION_TIMEOUT_MS,default=1200000"`
LoggerTimeout int `env:"LOG_QUEUE_STATS_INTERVAL_SEC,required"`
GroupDB string `env:"GROUP_DB,required"`
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
TopicAnalytics string `env:"TOPIC_ANALYTICS,required"`
CommitBatchTimeout time.Duration `env:"COMMIT_BATCH_TIMEOUT,default=15s"`
BatchQueueLimit int `env:"DB_BATCH_QUEUE_LIMIT,required"`
BatchSizeLimit int `env:"DB_BATCH_SIZE_LIMIT,required"`
UseQuickwit bool `env:"QUICKWIT_ENABLED,default=false"`
QuickwitTopic string `env:"QUICKWIT_TOPIC,default=saas-quickwit"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
ProjectExpiration time.Duration `env:"PROJECT_EXPIRATION,default=10m"`
LoggerTimeout int `env:"LOG_QUEUE_STATS_INTERVAL_SEC,required"`
GroupDB string `env:"GROUP_DB,required"`
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
TopicAnalytics string `env:"TOPIC_ANALYTICS,required"`
CommitBatchTimeout time.Duration `env:"COMMIT_BATCH_TIMEOUT,default=15s"`
BatchQueueLimit int `env:"DB_BATCH_QUEUE_LIMIT,required"`
BatchSizeLimit int `env:"DB_BATCH_SIZE_LIMIT,required"`
UseQuickwit bool `env:"QUICKWIT_ENABLED,default=false"`
QuickwitTopic string `env:"QUICKWIT_TOPIC,default=saas-quickwit"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
}
func New() *Config {

View file

@ -3,19 +3,20 @@ package ender
import (
"openreplay/backend/internal/config/common"
"openreplay/backend/internal/config/configurator"
"time"
)
type Config struct {
common.Config
common.Postgres
ProjectExpirationTimeoutMs int64 `env:"PROJECT_EXPIRATION_TIMEOUT_MS,default=1200000"`
GroupEnder string `env:"GROUP_ENDER,required"`
LoggerTimeout int `env:"LOG_QUEUE_STATS_INTERVAL_SEC,required"`
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
ProducerTimeout int `env:"PRODUCER_TIMEOUT,default=2000"`
PartitionsNumber int `env:"PARTITIONS_NUMBER,required"`
UseEncryption bool `env:"USE_ENCRYPTION,default=false"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
ProjectExpiration time.Duration `env:"PROJECT_EXPIRATION,default=10m"`
GroupEnder string `env:"GROUP_ENDER,required"`
LoggerTimeout int `env:"LOG_QUEUE_STATS_INTERVAL_SEC,required"`
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
ProducerTimeout int `env:"PRODUCER_TIMEOUT,default=2000"`
PartitionsNumber int `env:"PARTITIONS_NUMBER,required"`
UseEncryption bool `env:"USE_ENCRYPTION,default=false"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
}
func New() *Config {

View file

@ -24,6 +24,7 @@ type Config struct {
MaxMinDBFile string `env:"MAXMINDDB_FILE,required"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
UseAccessControlHeaders bool `env:"USE_CORS,default=false"`
ProjectExpiration time.Duration `env:"PROJECT_EXPIRATION,default=10m"`
WorkerID uint16
}

View file

@ -35,12 +35,12 @@ type cacheImpl struct {
projectExpirationTimeout time.Duration
}
func NewCache(conn *postgres.Conn, projectExpirationTimeoutMs int64) Cache {
func NewCache(conn *postgres.Conn, projectExpiration time.Duration) Cache {
newCache := &cacheImpl{
conn: conn,
sessions: make(map[uint64]*SessionMeta),
projects: make(map[uint32]*ProjectMeta),
projectExpirationTimeout: time.Duration(1000 * projectExpirationTimeoutMs),
projectExpirationTimeout: projectExpiration,
}
go newCache.cleaner()
return newCache

View file

@ -2,6 +2,7 @@ package cache
import (
"openreplay/backend/pkg/db/postgres"
"time"
)
type PGCache struct {
@ -9,9 +10,9 @@ type PGCache struct {
Cache Cache
}
func NewPGCache(conn *postgres.Conn, projectExpirationTimeoutMs int64) *PGCache {
func NewPGCache(conn *postgres.Conn, projectExpiration time.Duration) *PGCache {
// Create in-memory cache layer for sessions and projects
c := NewCache(conn, projectExpirationTimeoutMs)
c := NewCache(conn, projectExpiration)
// Return PG wrapper with integrated cache layer
return &PGCache{
Conn: conn,

View file

@ -68,7 +68,7 @@ func (s *saverImpl) handleExtraMessage(msg Message) error {
case *CustomEvent:
return s.ch.InsertCustom(session, m)
case *NetworkRequest:
project, err := s.pg.GetProject(session.ProjectID)
project, err := s.pg.Cache.GetProject(session.ProjectID)
if err != nil {
log.Printf("can't get project: %s", err)
} else {