openreplay/backend/cmd/heuristics/main.go
Alexander 4b8f3bee25
Sessions refactoring (#1371)
* feat(backend): moved sql requests related to sessions table to one place

* feat(backend): refactoring in db.Saver handler

* feat(backend): hude refactoring in db/postgres module

* fix(backend): workable feature flags

* fix(backend): workable integrations

* fix(backend): workable sessions and projects modules

* fix(backend): added missed projects module to sessions

* feat(backend): renaming

* feat(backend): moved session struct to sessions module and split methods into interface, cache and storage levels

* feat(backend): moved project struct to projects module

* feat(backend): added projects model

* feat(backend): implemented new in memory cache for sessions and projects

* feat(backend): implemented new cache in projects

* feat(backend): there are 2 methods in cache module now: Get() and GetAndRefresh()

* feat(backend): added cache update operations

* fix(backend): fixed import cycle

* fix(backend): fixed panic in db message handler

* fix(backend): fixed panic in projects module

* fix(backend): fixed panic in sessions.GetDuration

* feat(backend): added direct call to get session duration if session is already in cache

* feat(backend): used pg pool everywhere except db service

* fix(backend): added missing part after rebase

* fix(backend): removed old sessions file

* feat(backend): added refactored redis client with produce/consume options

* feat(backend): added cache layer for projects

* fix(backend): added missing redis config

* fix(backend): added missing method for producer

* feat(backend): cache integration for sessions

* feat(backend): temporary method to get session directly from db

* feat(backend): adapt EE version of message handler

* fix(backend): fixed issue in fts realisation

* fix(backend): added redis cache to sessions module

* fix(backend): set 0 duration or hesitation time for inputs without focus event

* feat(backend): added cache for session updates and failover mechanism for batch.Insert() operation

* feat(backend): debug log

* feat(backend): more debug log

* feat(backend): removed debug log

* fix(backend): fixed an issue of tracking input events with empty label

* fix(backend): disabled debug log in projects cache

* fix(backend): renamed session updater

* fix(backend): fixed closed pool issue in DB service

* fix(backend): fixed dead lock in db Stop() method

* fix(backend): fixed panic in heuristics service

* feat(backend): enabled redis cache in projects

* feat(backend): clear cache on each update operation

* feat(backend): fully integrated cache layer with auto switch

* feat(backend): small refactoring in session updates

* fix(backend): fixed wrong events counter issue

* feat(backend): enabled full cache support in ender and http services

* fix(backend/ee): added missed import

* feat(backend): added second cache layer for db to speed up the service

* feat(backend): disable redis cache

* feat(backend): moved redis cache to ee
2023-07-06 10:55:43 +02:00

64 lines
1.8 KiB
Go

package main
import (
"log"
config "openreplay/backend/internal/config/heuristics"
"openreplay/backend/internal/heuristics"
"openreplay/backend/pkg/builders"
"openreplay/backend/pkg/handlers"
"openreplay/backend/pkg/handlers/custom"
"openreplay/backend/pkg/handlers/web"
"openreplay/backend/pkg/memory"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/metrics"
heuristicsMetrics "openreplay/backend/pkg/metrics/heuristics"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/terminator"
)
func main() {
m := metrics.New()
m.Register(heuristicsMetrics.List())
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := config.New()
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
handlersFabric := func() []handlers.MessageProcessor {
return []handlers.MessageProcessor{
custom.NewInputEventBuilder(),
custom.NewPageEventBuilder(),
web.NewDeadClickDetector(),
&web.ClickRageDetector{},
&web.CpuIssueDetector{},
&web.MemoryIssueDetector{},
&web.NetworkIssueDetector{},
&web.PerformanceAggregator{},
}
}
eventBuilder := builders.NewBuilderMap(handlersFabric)
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
consumer := queue.NewConsumer(
cfg.GroupHeuristics,
[]string{
cfg.TopicRawWeb,
},
messages.NewMessageIterator(eventBuilder.HandleMessage, nil, true),
false,
cfg.MessageSizeLimit,
)
// Init memory manager
memoryManager, err := memory.NewManager(cfg.MemoryLimitMB, cfg.MaxMemoryUsage)
if err != nil {
log.Printf("can't init memory manager: %s", err)
return
}
// Run service and wait for TERM signal
service := heuristics.New(cfg, producer, consumer, eventBuilder, memoryManager)
log.Printf("Heuristics service started\n")
terminator.Wait(service)
log.Printf("Heuristics service stopped\n")
}