Json logs format (#1952)
* feat(backend): try a new approach for logs formatting (http) * feat(backend): added logger module * feat(backend): added project/session info to /i endpoint * feat(backend): found a solution for correct caller information * feat(backend): finished logs for http handlers * feat(backend): finished logs for mobile http handlers * feat(backend): finished ender * feat(backend): finished assets * feat(backend): finished heuristics * feat(backend): finished image-storage * feat(backend): finished sink * feat(backend): finished storage * feat(backend): formatted logs in all services * feat(backend): finished foss part * feat(backend): added missed foss part * feat(backend): fixed panic in memory manager and sink service * feat(backend): connectors
This commit is contained in:
parent
7e83cd0f2b
commit
45c956c489
92 changed files with 1224 additions and 992 deletions
|
|
@ -1,7 +1,8 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -18,20 +19,18 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
m := metrics.New()
|
||||
m.Register(assetsMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, assetsMetrics.List())
|
||||
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Error on object storage creation: %v", err)
|
||||
log.Fatal(ctx, "can't init object storage: %s", err)
|
||||
}
|
||||
cacher, err := cacher.NewCacher(cfg, objStore)
|
||||
if err != nil {
|
||||
log.Fatalf("Error on cacher creation: %v", err)
|
||||
log.Fatal(ctx, "can't init cacher: %s", err)
|
||||
}
|
||||
|
||||
msgHandler := func(msg messages.Message) {
|
||||
|
|
@ -39,11 +38,10 @@ func main() {
|
|||
case *messages.AssetCache:
|
||||
cacher.CacheURL(m.SessionID(), m.URL)
|
||||
assetsMetrics.IncreaseProcessesSessions()
|
||||
// TODO: connect to "raw" topic in order to listen for JSException
|
||||
case *messages.JSException:
|
||||
sourceList, err := assets.ExtractJSExceptionSources(&m.Payload)
|
||||
if err != nil {
|
||||
log.Printf("Error on source extraction: %v", err)
|
||||
log.Error(ctx, "Error on source extraction: %s", err)
|
||||
return
|
||||
}
|
||||
for _, source := range sourceList {
|
||||
|
|
@ -55,12 +53,12 @@ func main() {
|
|||
msgConsumer := queue.NewConsumer(
|
||||
cfg.GroupCache,
|
||||
[]string{cfg.TopicCache},
|
||||
messages.NewMessageIterator(msgHandler, []int{messages.MsgAssetCache, messages.MsgJSException}, true),
|
||||
messages.NewMessageIterator(log, msgHandler, []int{messages.MsgAssetCache, messages.MsgJSException}, true),
|
||||
true,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
||||
log.Printf("Cacher service started\n")
|
||||
log.Info(ctx, "Cacher service started")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
|
@ -69,22 +67,22 @@ func main() {
|
|||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
log.Error(ctx, "Caught signal %v: terminating", sig)
|
||||
cacher.Stop()
|
||||
msgConsumer.Close()
|
||||
os.Exit(0)
|
||||
case err := <-cacher.Errors:
|
||||
log.Printf("Error while caching: %v", err)
|
||||
log.Error(ctx, "Error while caching: %s", err)
|
||||
case <-tick:
|
||||
cacher.UpdateTimeouts()
|
||||
case msg := <-msgConsumer.Rebalanced():
|
||||
log.Println(msg)
|
||||
log.Info(ctx, "Rebalanced: %v", msg)
|
||||
default:
|
||||
if !cacher.CanCache() {
|
||||
continue
|
||||
}
|
||||
if err := msgConsumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consumption: %v", err)
|
||||
log.Fatal(ctx, "Error on consumption: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
"openreplay/backend/internal/canvas-handler"
|
||||
config "openreplay/backend/internal/config/canvas-handler"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
storageMetrics "openreplay/backend/pkg/metrics/imagestorage"
|
||||
|
|
@ -17,22 +18,19 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
m := metrics.New()
|
||||
m.Register(storageMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, storageMetrics.List())
|
||||
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("can't init object storage: %s", err)
|
||||
log.Fatal(ctx, "can't init object storage: %s", err)
|
||||
}
|
||||
|
||||
srv, err := canvas_handler.New(cfg, objStore)
|
||||
srv, err := canvas_handler.New(cfg, log, objStore)
|
||||
if err != nil {
|
||||
log.Printf("can't init storage service: %s", err)
|
||||
return
|
||||
log.Fatal(ctx, "can't init canvas service: %s", err)
|
||||
}
|
||||
|
||||
canvasConsumer := queue.NewConsumer(
|
||||
|
|
@ -56,14 +54,15 @@ func main() {
|
|||
}
|
||||
return true
|
||||
}
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", sessID)
|
||||
|
||||
if isSessionEnd(data) {
|
||||
if err := srv.PackSessionCanvases(sessID); err != nil {
|
||||
log.Printf("can't prepare canvas: %s", err)
|
||||
if err := srv.PackSessionCanvases(sessCtx, sessID); err != nil {
|
||||
log.Error(sessCtx, "can't pack session's canvases: %s", err)
|
||||
}
|
||||
} else {
|
||||
if err := srv.SaveCanvasToDisk(sessID, data); err != nil {
|
||||
log.Printf("can't process canvas image: %s", err)
|
||||
if err := srv.SaveCanvasToDisk(sessCtx, sessID, data); err != nil {
|
||||
log.Error(sessCtx, "can't process canvas image: %s", err)
|
||||
}
|
||||
}
|
||||
}, nil, true),
|
||||
|
|
@ -71,7 +70,7 @@ func main() {
|
|||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
||||
log.Printf("Canvas handler service started\n")
|
||||
log.Info(ctx, "canvas handler service started")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
|
@ -80,21 +79,21 @@ func main() {
|
|||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
log.Info(ctx, "caught signal %v: terminating", sig)
|
||||
srv.Wait()
|
||||
canvasConsumer.Close()
|
||||
os.Exit(0)
|
||||
case <-counterTick:
|
||||
srv.Wait()
|
||||
if err := canvasConsumer.Commit(); err != nil {
|
||||
log.Printf("can't commit messages: %s", err)
|
||||
log.Error(ctx, "can't commit messages: %s", err)
|
||||
}
|
||||
case msg := <-canvasConsumer.Rebalanced():
|
||||
log.Println(msg)
|
||||
log.Info(ctx, "consumer group rebalanced: %+v", msg)
|
||||
default:
|
||||
err = canvasConsumer.ConsumeNext()
|
||||
if err != nil {
|
||||
log.Fatalf("Error on images consumption: %v", err)
|
||||
log.Fatal(ctx, "can't consume next message: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,13 +1,15 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
|
||||
config "openreplay/backend/internal/config/db"
|
||||
"openreplay/backend/internal/db"
|
||||
"openreplay/backend/internal/db/datasaver"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/db/redis"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/memory"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
|
|
@ -20,38 +22,34 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, databaseMetrics.List())
|
||||
|
||||
m := metrics.New()
|
||||
m.Register(databaseMetrics.List())
|
||||
|
||||
cfg := config.New()
|
||||
|
||||
// Init postgres connection
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Printf("can't init postgres connection: %s", err)
|
||||
return
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
defer pgConn.Close()
|
||||
|
||||
// Init events module
|
||||
pg := postgres.NewConn(pgConn)
|
||||
pg := postgres.NewConn(log, pgConn)
|
||||
defer pg.Close()
|
||||
|
||||
// Init redis connection
|
||||
redisClient, err := redis.New(&cfg.Redis)
|
||||
if err != nil {
|
||||
log.Printf("can't init redis connection: %s", err)
|
||||
log.Warn(ctx, "can't init redis connection: %s", err)
|
||||
}
|
||||
defer redisClient.Close()
|
||||
|
||||
projManager := projects.New(pgConn, redisClient)
|
||||
sessManager := sessions.New(pgConn, projManager, redisClient)
|
||||
tagsManager := tags.New(pgConn)
|
||||
projManager := projects.New(log, pgConn, redisClient)
|
||||
sessManager := sessions.New(log, pgConn, projManager, redisClient)
|
||||
tagsManager := tags.New(log, pgConn)
|
||||
|
||||
// Init data saver
|
||||
saver := datasaver.New(cfg, pg, sessManager, tagsManager)
|
||||
saver := datasaver.New(log, cfg, pg, sessManager, tagsManager)
|
||||
|
||||
// Message filter
|
||||
msgFilter := []int{
|
||||
|
|
@ -78,21 +76,19 @@ func main() {
|
|||
cfg.TopicRawIOS,
|
||||
cfg.TopicAnalytics,
|
||||
},
|
||||
messages.NewMessageIterator(saver.Handle, msgFilter, true),
|
||||
messages.NewMessageIterator(log, saver.Handle, msgFilter, true),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
||||
// Init memory manager
|
||||
memoryManager, err := memory.NewManager(cfg.MemoryLimitMB, cfg.MaxMemoryUsage)
|
||||
memoryManager, err := memory.NewManager(log, cfg.MemoryLimitMB, cfg.MaxMemoryUsage)
|
||||
if err != nil {
|
||||
log.Printf("can't init memory manager: %s", err)
|
||||
return
|
||||
log.Fatal(ctx, "can't init memory manager: %s", err)
|
||||
}
|
||||
|
||||
// Run service and wait for TERM signal
|
||||
service := db.New(cfg, consumer, saver, memoryManager, sessManager)
|
||||
log.Printf("Db service started\n")
|
||||
terminator.Wait(service)
|
||||
log.Printf("Db service stopped\n")
|
||||
service := db.New(log, cfg, consumer, saver, memoryManager, sessManager)
|
||||
log.Info(ctx, "Db service started")
|
||||
terminator.Wait(log, service)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
"fmt"
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/db/redis"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/memory"
|
||||
"openreplay/backend/pkg/projects"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
|
|
@ -26,36 +28,29 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
m := metrics.New()
|
||||
m.Register(enderMetrics.List())
|
||||
m.Register(databaseMetrics.List())
|
||||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := ender.New(log)
|
||||
metrics.New(log, append(enderMetrics.List(), databaseMetrics.List()...))
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := ender.New()
|
||||
|
||||
// Init postgres connection
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Printf("can't init postgres connection: %s", err)
|
||||
return
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
defer pgConn.Close()
|
||||
|
||||
// Init redis connection
|
||||
redisClient, err := redis.New(&cfg.Redis)
|
||||
if err != nil {
|
||||
log.Printf("can't init redis connection: %s", err)
|
||||
log.Warn(ctx, "can't init redis connection: %s", err)
|
||||
}
|
||||
defer redisClient.Close()
|
||||
|
||||
projManager := projects.New(pgConn, redisClient)
|
||||
sessManager := sessions.New(pgConn, projManager, redisClient)
|
||||
projManager := projects.New(log, pgConn, redisClient)
|
||||
sessManager := sessions.New(log, pgConn, projManager, redisClient)
|
||||
|
||||
sessionEndGenerator, err := sessionender.New(intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
|
||||
if err != nil {
|
||||
log.Printf("can't init ender service: %s", err)
|
||||
return
|
||||
log.Fatal(ctx, "can't init ender service: %s", err)
|
||||
}
|
||||
|
||||
mobileMessages := []int{90, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 107, 110, 111}
|
||||
|
|
@ -68,6 +63,7 @@ func main() {
|
|||
cfg.TopicRawIOS,
|
||||
},
|
||||
messages.NewEnderMessageIterator(
|
||||
log,
|
||||
func(msg messages.Message) { sessionEndGenerator.UpdateSession(msg) },
|
||||
append([]int{messages.MsgTimestamp}, mobileMessages...),
|
||||
false),
|
||||
|
|
@ -75,13 +71,12 @@ func main() {
|
|||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
||||
memoryManager, err := memory.NewManager(cfg.MemoryLimitMB, cfg.MaxMemoryUsage)
|
||||
memoryManager, err := memory.NewManager(log, cfg.MemoryLimitMB, cfg.MaxMemoryUsage)
|
||||
if err != nil {
|
||||
log.Printf("can't init memory manager: %s", err)
|
||||
return
|
||||
log.Fatal(ctx, "can't init memory manager: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("Ender service started\n")
|
||||
log.Info(ctx, "Ender service started")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
|
@ -90,10 +85,10 @@ func main() {
|
|||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
log.Info(ctx, "Caught signal %v: terminating", sig)
|
||||
producer.Close(cfg.ProducerTimeout)
|
||||
if err := consumer.CommitBack(intervals.EVENTS_BACK_COMMIT_GAP); err != nil {
|
||||
log.Printf("can't commit messages with offset: %s", err)
|
||||
log.Error(ctx, "can't commit messages with offset: %s", err)
|
||||
}
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
|
|
@ -120,14 +115,15 @@ func main() {
|
|||
|
||||
// Find ended sessions and send notification to other services
|
||||
sessionEndGenerator.HandleEndedSessions(func(sessionID uint64, timestamp uint64) (bool, int) {
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", fmt.Sprintf("%d", sessionID))
|
||||
msg := &messages.SessionEnd{Timestamp: timestamp}
|
||||
currDuration, err := sessManager.GetDuration(sessionID)
|
||||
if err != nil {
|
||||
log.Printf("getSessionDuration failed, sessID: %d, err: %s", sessionID, err)
|
||||
log.Error(sessCtx, "getSessionDuration failed, err: %s", err)
|
||||
}
|
||||
sess, err := sessManager.Get(sessionID)
|
||||
if err != nil {
|
||||
log.Printf("can't get session from database to compare durations, sessID: %d, err: %s", sessionID, err)
|
||||
log.Error(sessCtx, "can't get session from database to compare durations, err: %s", err)
|
||||
} else {
|
||||
newDur := timestamp - sess.Timestamp
|
||||
// Skip if session was ended before with same duration
|
||||
|
|
@ -156,7 +152,7 @@ func main() {
|
|||
noSessionInDB[sessionID] = timestamp
|
||||
return true, int(NoSessionInDB)
|
||||
}
|
||||
log.Printf("can't save sessionEnd to database, sessID: %d, err: %s", sessionID, err)
|
||||
log.Error(sessCtx, "can't update session duration, err: %s", err)
|
||||
return false, 0
|
||||
}
|
||||
// Check one more time just in case
|
||||
|
|
@ -167,7 +163,7 @@ func main() {
|
|||
if cfg.UseEncryption {
|
||||
if key := storage.GenerateEncryptionKey(); key != nil {
|
||||
if err := sessManager.UpdateEncryptionKey(sessionID, key); err != nil {
|
||||
log.Printf("can't save session encryption key: %s, session will not be encrypted", err)
|
||||
log.Warn(sessCtx, "can't save session encryption key: %s, session will not be encrypted", err)
|
||||
} else {
|
||||
msg.EncryptionKey = string(key)
|
||||
}
|
||||
|
|
@ -176,21 +172,21 @@ func main() {
|
|||
if sess != nil && sess.Platform == "ios" {
|
||||
msg := &messages.IOSSessionEnd{Timestamp: timestamp}
|
||||
if err := producer.Produce(cfg.TopicRawIOS, sessionID, msg.Encode()); err != nil {
|
||||
log.Printf("can't send iOSSessionEnd to topic: %s; sessID: %d", err, sessionID)
|
||||
log.Error(sessCtx, "can't send iOSSessionEnd to mobile topic: %s", err)
|
||||
return false, 0
|
||||
}
|
||||
// Inform canvas service about session end
|
||||
if err := producer.Produce(cfg.TopicRawImages, sessionID, msg.Encode()); err != nil {
|
||||
log.Printf("can't send sessionEnd signal to mobile images topic: %s; sessID: %d", err, sessionID)
|
||||
log.Error(sessCtx, "can't send iOSSessionEnd signal to canvas topic: %s", err)
|
||||
}
|
||||
} else {
|
||||
if err := producer.Produce(cfg.TopicRawWeb, sessionID, msg.Encode()); err != nil {
|
||||
log.Printf("can't send sessionEnd to raw topic: %s; sessID: %d", err, sessionID)
|
||||
log.Error(sessCtx, "can't send sessionEnd to raw topic: %s", err)
|
||||
return false, 0
|
||||
}
|
||||
// Inform canvas service about session end
|
||||
if err := producer.Produce(cfg.TopicCanvasImages, sessionID, msg.Encode()); err != nil {
|
||||
log.Printf("can't send sessionEnd signal to canvas topic: %s; sessID: %d", err, sessionID)
|
||||
log.Error(sessCtx, "can't send sessionEnd signal to canvas topic: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -203,23 +199,22 @@ func main() {
|
|||
return true, int(NewSessionEnd)
|
||||
})
|
||||
if n := len(failedSessionEnds); n > 0 {
|
||||
log.Println("sessions with wrong duration:", n, failedSessionEnds)
|
||||
log.Info(ctx, "sessions with wrong duration: %d, %v", n, failedSessionEnds)
|
||||
}
|
||||
if n := len(negativeDuration); n > 0 {
|
||||
log.Println("sessions with negative duration:", n, negativeDuration)
|
||||
log.Info(ctx, "sessions with negative duration: %d, %v", n, negativeDuration)
|
||||
}
|
||||
if n := len(noSessionInDB); n > 0 {
|
||||
log.Printf("sessions without info in DB: %d, %v", n, noSessionInDB)
|
||||
log.Info(ctx, "sessions without info in DB: %d, %v", n, noSessionInDB)
|
||||
}
|
||||
log.Printf("[INFO] failed: %d, negative: %d, shorter: %d, same: %d, updated: %d, new: %d, not found: %d",
|
||||
log.Info(ctx, "failed: %d, negative: %d, shorter: %d, same: %d, updated: %d, new: %d, not found: %d",
|
||||
len(failedSessionEnds), len(negativeDuration), len(shorterDuration), len(duplicatedSessionEnds),
|
||||
updatedDurations, newSessionEnds, len(noSessionInDB))
|
||||
producer.Flush(cfg.ProducerTimeout)
|
||||
if err := consumer.CommitBack(intervals.EVENTS_BACK_COMMIT_GAP); err != nil {
|
||||
log.Printf("can't commit messages with offset: %s", err)
|
||||
log.Error(ctx, "can't commit messages with offset: %s", err)
|
||||
}
|
||||
case msg := <-consumer.Rebalanced():
|
||||
log.Printf("Rebalanced event, type: %s, partitions: %+v", msg.Type, msg.Partitions)
|
||||
if msg.Type == types.RebalanceTypeRevoke {
|
||||
sessionEndGenerator.Disable()
|
||||
} else {
|
||||
|
|
@ -231,7 +226,7 @@ func main() {
|
|||
continue
|
||||
}
|
||||
if err := consumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consuming: %v", err)
|
||||
log.Fatal(ctx, "error on consuming: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
config "openreplay/backend/internal/config/heuristics"
|
||||
"openreplay/backend/internal/heuristics"
|
||||
"openreplay/backend/pkg/builders"
|
||||
|
|
@ -9,6 +9,7 @@ import (
|
|||
"openreplay/backend/pkg/handlers/custom"
|
||||
"openreplay/backend/pkg/handlers/ios"
|
||||
"openreplay/backend/pkg/handlers/web"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/memory"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
|
|
@ -18,11 +19,10 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
m := metrics.New()
|
||||
m.Register(heuristicsMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
cfg := config.New()
|
||||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, heuristicsMetrics.List())
|
||||
|
||||
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
|
||||
handlersFabric := func() []handlers.MessageProcessor {
|
||||
|
|
@ -40,7 +40,7 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
eventBuilder := builders.NewBuilderMap(handlersFabric)
|
||||
eventBuilder := builders.NewBuilderMap(log, handlersFabric)
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupHeuristics,
|
||||
|
|
@ -48,21 +48,20 @@ func main() {
|
|||
cfg.TopicRawWeb,
|
||||
cfg.TopicRawIOS,
|
||||
},
|
||||
messages.NewMessageIterator(eventBuilder.HandleMessage, nil, true),
|
||||
messages.NewMessageIterator(log, eventBuilder.HandleMessage, nil, true),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
||||
// Init memory manager
|
||||
memoryManager, err := memory.NewManager(cfg.MemoryLimitMB, cfg.MaxMemoryUsage)
|
||||
memoryManager, err := memory.NewManager(log, cfg.MemoryLimitMB, cfg.MaxMemoryUsage)
|
||||
if err != nil {
|
||||
log.Printf("can't init memory manager: %s", err)
|
||||
log.Fatal(ctx, "can't init memory manager: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Run service and wait for TERM signal
|
||||
service := heuristics.New(cfg, producer, consumer, eventBuilder, memoryManager)
|
||||
log.Printf("Heuristics service started\n")
|
||||
terminator.Wait(service)
|
||||
log.Printf("Heuristics service stopped\n")
|
||||
service := heuristics.New(log, cfg, producer, consumer, eventBuilder, memoryManager)
|
||||
log.Info(ctx, "Heuristics service started")
|
||||
terminator.Wait(log, service)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,9 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/db/redis"
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -12,6 +10,9 @@ import (
|
|||
"openreplay/backend/internal/http/router"
|
||||
"openreplay/backend/internal/http/server"
|
||||
"openreplay/backend/internal/http/services"
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/db/redis"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
httpMetrics "openreplay/backend/pkg/metrics/http"
|
||||
|
|
@ -19,64 +20,55 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
m := metrics.New()
|
||||
m.Register(httpMetrics.List())
|
||||
m.Register(databaseMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := http.New()
|
||||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := http.New(log)
|
||||
metrics.New(log, append(httpMetrics.List(), databaseMetrics.List()...))
|
||||
|
||||
// Connect to queue
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
defer producer.Close(15000)
|
||||
|
||||
// Init postgres connection
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Printf("can't init postgres connection: %s", err)
|
||||
return
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
defer pgConn.Close()
|
||||
|
||||
// Init redis connection
|
||||
redisClient, err := redis.New(&cfg.Redis)
|
||||
if err != nil {
|
||||
log.Printf("can't init redis connection: %s", err)
|
||||
log.Warn(ctx, "can't init redis connection: %s", err)
|
||||
}
|
||||
defer redisClient.Close()
|
||||
|
||||
// Build all services
|
||||
services, err := services.New(cfg, producer, pgConn, redisClient)
|
||||
services, err := services.New(log, cfg, producer, pgConn, redisClient)
|
||||
if err != nil {
|
||||
log.Fatalf("failed while creating services: %s", err)
|
||||
log.Fatal(ctx, "failed while creating services: %s", err)
|
||||
}
|
||||
|
||||
// Init server's routes
|
||||
router, err := router.NewRouter(cfg, services)
|
||||
router, err := router.NewRouter(cfg, log, services)
|
||||
if err != nil {
|
||||
log.Fatalf("failed while creating engine: %s", err)
|
||||
log.Fatal(ctx, "failed while creating router: %s", err)
|
||||
}
|
||||
|
||||
// Init server
|
||||
server, err := server.New(router.GetHandler(), cfg.HTTPHost, cfg.HTTPPort, cfg.HTTPTimeout)
|
||||
if err != nil {
|
||||
log.Fatalf("failed while creating server: %s", err)
|
||||
log.Fatal(ctx, "failed while creating server: %s", err)
|
||||
}
|
||||
|
||||
// Run server
|
||||
go func() {
|
||||
if err := server.Start(); err != nil {
|
||||
log.Fatalf("Server error: %v\n", err)
|
||||
log.Fatal(ctx, "http server error: %s", err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("Server successfully started on port %v\n", cfg.HTTPPort)
|
||||
log.Info(ctx, "server successfully started on port %s", cfg.HTTPPort)
|
||||
|
||||
// Wait stop signal to shut down server gracefully
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-sigchan
|
||||
log.Printf("Shutting down the server\n")
|
||||
log.Info(ctx, "shutting down the server")
|
||||
server.Stop()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,9 +1,8 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"openreplay/backend/pkg/objectstorage/store"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
|
|
@ -12,29 +11,28 @@ import (
|
|||
|
||||
config "openreplay/backend/internal/config/imagestorage"
|
||||
"openreplay/backend/internal/screenshot-handler"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
storageMetrics "openreplay/backend/pkg/metrics/imagestorage"
|
||||
"openreplay/backend/pkg/objectstorage/store"
|
||||
"openreplay/backend/pkg/queue"
|
||||
)
|
||||
|
||||
func main() {
|
||||
m := metrics.New()
|
||||
m.Register(storageMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, storageMetrics.List())
|
||||
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("can't init object storage: %s", err)
|
||||
log.Fatal(ctx, "can't init object storage: %s", err)
|
||||
}
|
||||
|
||||
srv, err := screenshot_handler.New(cfg, objStore)
|
||||
srv, err := screenshot_handler.New(cfg, log, objStore)
|
||||
if err != nil {
|
||||
log.Printf("can't init storage service: %s", err)
|
||||
return
|
||||
log.Fatal(ctx, "can't init storage service: %s", err)
|
||||
}
|
||||
|
||||
workDir := cfg.FSDir
|
||||
|
|
@ -45,7 +43,6 @@ func main() {
|
|||
cfg.TopicRawImages,
|
||||
},
|
||||
messages.NewImagesMessageIterator(func(data []byte, sessID uint64) {
|
||||
log.Printf("Received message for session %d\n", sessID)
|
||||
checkSessionEnd := func(data []byte) (messages.Message, error) {
|
||||
reader := messages.NewBytesReader(data)
|
||||
msgType, err := reader.ReadUint()
|
||||
|
|
@ -61,16 +58,15 @@ func main() {
|
|||
}
|
||||
return msg, nil
|
||||
}
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", sessID)
|
||||
|
||||
if _, err := checkSessionEnd(data); err == nil {
|
||||
// Pack all screenshots from mobile session, compress and upload to object storage
|
||||
if err := srv.PackScreenshots(sessID, workDir+"/screenshots/"+strconv.FormatUint(sessID, 10)+"/"); err != nil {
|
||||
log.Printf("upload session err: %s, sessID: %d", err, sessID)
|
||||
if err := srv.PackScreenshots(sessCtx, sessID, workDir+"/screenshots/"+strconv.FormatUint(sessID, 10)+"/"); err != nil {
|
||||
log.Error(sessCtx, "can't pack screenshots: %s", err)
|
||||
}
|
||||
} else {
|
||||
// Unpack new screenshots package from mobile session
|
||||
if err := srv.Process(sessID, data); err != nil {
|
||||
log.Printf("can't process mobile screenshots: %s", err)
|
||||
if err := srv.Process(sessCtx, sessID, data); err != nil {
|
||||
log.Error(sessCtx, "can't process screenshots: %s", err)
|
||||
}
|
||||
}
|
||||
}, nil, true),
|
||||
|
|
@ -78,7 +74,7 @@ func main() {
|
|||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
||||
log.Printf("Image storage service started\n")
|
||||
log.Info(ctx, "Image storage service started")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
|
@ -87,21 +83,21 @@ func main() {
|
|||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
log.Info(ctx, "Caught signal %v: terminating", sig)
|
||||
srv.Wait()
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-counterTick:
|
||||
srv.Wait()
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("can't commit messages: %s", err)
|
||||
log.Error(ctx, "can't commit messages: %s", err)
|
||||
}
|
||||
case msg := <-consumer.Rebalanced():
|
||||
log.Println(msg)
|
||||
log.Info(ctx, "Rebalanced: %v", msg)
|
||||
default:
|
||||
err := consumer.ConsumeNext()
|
||||
if err != nil {
|
||||
log.Fatalf("Error on images consumption: %v", err)
|
||||
log.Fatal(ctx, "Error on images consumption: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -11,6 +10,7 @@ import (
|
|||
|
||||
config "openreplay/backend/internal/config/integrations"
|
||||
"openreplay/backend/pkg/integrations"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/queue"
|
||||
|
|
@ -18,44 +18,42 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
m := metrics.New()
|
||||
m.Register(databaseMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, databaseMetrics.List())
|
||||
|
||||
pgConn, err := pgx.Connect(context.Background(), cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Fatalf("can't init postgres connection: %s", err)
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
defer pgConn.Close(context.Background())
|
||||
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
defer producer.Close(15000)
|
||||
|
||||
storage := integrations.NewStorage(pgConn)
|
||||
storage := integrations.NewStorage(pgConn, log)
|
||||
if err := storage.Listen(); err != nil {
|
||||
log.Fatalf("Listener error: %v", err)
|
||||
log.Fatal(ctx, "can't init storage listener: %s", err)
|
||||
}
|
||||
|
||||
listener, err := integrations.New(cfg, storage, producer, integrations.NewManager(), token.NewTokenizer(cfg.TokenSecret))
|
||||
listener, err := integrations.New(log, cfg, storage, producer, integrations.NewManager(log), token.NewTokenizer(cfg.TokenSecret))
|
||||
if err != nil {
|
||||
log.Fatalf("Listener error: %v", err)
|
||||
log.Fatal(ctx, "can't init service: %s", err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
log.Printf("Integration service started\n")
|
||||
log.Info(ctx, "integration service started")
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
log.Info(ctx, "caught signal %v: terminating", sig)
|
||||
os.Exit(0)
|
||||
case err := <-listener.Errors:
|
||||
log.Printf("Listener error: %v", err)
|
||||
log.Error(ctx, "listener error: %s", err)
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,8 +2,8 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -13,6 +13,7 @@ import (
|
|||
"openreplay/backend/internal/sink/assetscache"
|
||||
"openreplay/backend/internal/sink/sessionwriter"
|
||||
"openreplay/backend/internal/storage"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
sinkMetrics "openreplay/backend/pkg/metrics/sink"
|
||||
|
|
@ -21,22 +22,24 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
m := metrics.New()
|
||||
m.Register(sinkMetrics.List())
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := sink.New()
|
||||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := sink.New(log)
|
||||
metrics.New(log, sinkMetrics.List())
|
||||
|
||||
if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) {
|
||||
log.Fatalf("%v doesn't exist. %v", cfg.FsDir, err)
|
||||
log.Fatal(ctx, "%v doesn't exist. %v", cfg.FsDir, err)
|
||||
}
|
||||
|
||||
writer := sessionwriter.NewWriter(cfg.FsUlimit, cfg.FsDir, cfg.FileBuffer, cfg.SyncTimeout)
|
||||
writer := sessionwriter.NewWriter(log, cfg.FsUlimit, cfg.FsDir, cfg.FileBuffer, cfg.SyncTimeout)
|
||||
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
defer producer.Close(cfg.ProducerCloseTimeout)
|
||||
rewriter := assets.NewRewriter(cfg.AssetsOrigin)
|
||||
assetMessageHandler := assetscache.New(cfg, rewriter, producer)
|
||||
rewriter, err := assets.NewRewriter(cfg.AssetsOrigin)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init rewriter: %s", err)
|
||||
}
|
||||
assetMessageHandler := assetscache.New(log, cfg, rewriter, producer)
|
||||
counter := storage.NewLogCounter()
|
||||
|
||||
var (
|
||||
|
|
@ -51,6 +54,7 @@ func main() {
|
|||
devBuffer.Reset()
|
||||
|
||||
msgHandler := func(msg messages.Message) {
|
||||
|
||||
// Check batchEnd signal (nil message)
|
||||
if msg == nil {
|
||||
// Skip empty buffers
|
||||
|
|
@ -62,7 +66,8 @@ func main() {
|
|||
|
||||
// Write buffered batches to the session
|
||||
if err := writer.Write(sessionID, domBuffer.Bytes(), devBuffer.Bytes()); err != nil {
|
||||
log.Printf("writer error: %s", err)
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", sessionID)
|
||||
log.Error(sessCtx, "writer error: %s", err)
|
||||
}
|
||||
|
||||
// Prepare buffer for the next batch
|
||||
|
|
@ -73,16 +78,17 @@ func main() {
|
|||
}
|
||||
|
||||
sinkMetrics.IncreaseTotalMessages()
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", msg.SessionID())
|
||||
|
||||
// Send SessionEnd trigger to storage service
|
||||
if msg.TypeID() == messages.MsgSessionEnd || msg.TypeID() == messages.MsgIOSSessionEnd {
|
||||
if err := producer.Produce(cfg.TopicTrigger, msg.SessionID(), msg.Encode()); err != nil {
|
||||
log.Printf("can't send SessionEnd to trigger topic: %s; sessID: %d", err, msg.SessionID())
|
||||
log.Error(sessCtx, "can't send SessionEnd to trigger topic: %s", err)
|
||||
}
|
||||
// duplicate session end message to mobile trigger topic to build video replay for mobile sessions
|
||||
if msg.TypeID() == messages.MsgIOSSessionEnd {
|
||||
if err := producer.Produce(cfg.TopicMobileTrigger, msg.SessionID(), msg.Encode()); err != nil {
|
||||
log.Printf("can't send iOSSessionEnd to mobile trigger topic: %s; sessID: %d", err, msg.SessionID())
|
||||
log.Error(sessCtx, "can't send iOSSessionEnd to mobile trigger topic: %s", err)
|
||||
}
|
||||
}
|
||||
writer.Close(msg.SessionID())
|
||||
|
|
@ -97,7 +103,7 @@ func main() {
|
|||
msg.TypeID() == messages.MsgAdoptedSSInsertRuleURLBased {
|
||||
m := msg.Decode()
|
||||
if m == nil {
|
||||
log.Printf("assets decode err, info: %s", msg.Meta().Batch().Info())
|
||||
log.Error(sessCtx, "assets decode err, info: %s", msg.Meta().Batch().Info())
|
||||
return
|
||||
}
|
||||
msg = assetMessageHandler.ParseAssets(m)
|
||||
|
|
@ -111,7 +117,7 @@ func main() {
|
|||
// If message timestamp is empty, use at least ts of session start
|
||||
ts := msg.Meta().Timestamp
|
||||
if ts == 0 {
|
||||
log.Printf("zero ts; sessID: %d, msgType: %d", msg.SessionID(), msg.TypeID())
|
||||
log.Warn(sessCtx, "zero ts in msgType: %d", msg.TypeID())
|
||||
} else {
|
||||
// Log ts of last processed message
|
||||
counter.Update(msg.SessionID(), time.UnixMilli(int64(ts)))
|
||||
|
|
@ -141,18 +147,18 @@ func main() {
|
|||
// Write message index
|
||||
n, err = domBuffer.Write(messageIndex)
|
||||
if err != nil {
|
||||
log.Printf("domBuffer index write err: %s", err)
|
||||
log.Error(sessCtx, "domBuffer index write err: %s", err)
|
||||
}
|
||||
if n != len(messageIndex) {
|
||||
log.Printf("domBuffer index not full write: %d/%d", n, len(messageIndex))
|
||||
log.Error(sessCtx, "domBuffer index not full write: %d/%d", n, len(messageIndex))
|
||||
}
|
||||
// Write message body
|
||||
n, err = domBuffer.Write(msg.Encode())
|
||||
if err != nil {
|
||||
log.Printf("domBuffer message write err: %s", err)
|
||||
log.Error(sessCtx, "domBuffer message write err: %s", err)
|
||||
}
|
||||
if n != len(msg.Encode()) {
|
||||
log.Printf("domBuffer message not full write: %d/%d", n, len(messageIndex))
|
||||
log.Error(sessCtx, "domBuffer message not full write: %d/%d", n, len(messageIndex))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -161,18 +167,18 @@ func main() {
|
|||
// Write message index
|
||||
n, err = devBuffer.Write(messageIndex)
|
||||
if err != nil {
|
||||
log.Printf("devBuffer index write err: %s", err)
|
||||
log.Error(sessCtx, "devBuffer index write err: %s", err)
|
||||
}
|
||||
if n != len(messageIndex) {
|
||||
log.Printf("devBuffer index not full write: %d/%d", n, len(messageIndex))
|
||||
log.Error(sessCtx, "devBuffer index not full write: %d/%d", n, len(messageIndex))
|
||||
}
|
||||
// Write message body
|
||||
n, err = devBuffer.Write(msg.Encode())
|
||||
if err != nil {
|
||||
log.Printf("devBuffer message write err: %s", err)
|
||||
log.Error(sessCtx, "devBuffer message write err: %s", err)
|
||||
}
|
||||
if n != len(msg.Encode()) {
|
||||
log.Printf("devBuffer message not full write: %d/%d", n, len(messageIndex))
|
||||
log.Error(sessCtx, "devBuffer message not full write: %d/%d", n, len(messageIndex))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -186,11 +192,11 @@ func main() {
|
|||
cfg.TopicRawWeb,
|
||||
cfg.TopicRawIOS,
|
||||
},
|
||||
messages.NewSinkMessageIterator(msgHandler, nil, false),
|
||||
messages.NewSinkMessageIterator(log, msgHandler, nil, false),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
log.Printf("Sink service started\n")
|
||||
log.Info(ctx, "sink service started")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
|
@ -200,35 +206,35 @@ func main() {
|
|||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
log.Info(ctx, "Caught signal %v: terminating", sig)
|
||||
// Sync and stop writer
|
||||
writer.Stop()
|
||||
// Commit and stop consumer
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("can't commit messages: %s", err)
|
||||
log.Error(ctx, "can't commit messages: %s", err)
|
||||
}
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-tick:
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("can't commit messages: %s", err)
|
||||
log.Error(ctx, "can't commit messages: %s", err)
|
||||
}
|
||||
case <-tickInfo:
|
||||
counter.Print()
|
||||
log.Printf("writer: %s", writer.Info())
|
||||
log.Info(ctx, "%s", counter.Log())
|
||||
log.Info(ctx, "writer: %s", writer.Info())
|
||||
case <-consumer.Rebalanced():
|
||||
s := time.Now()
|
||||
// Commit now to avoid duplicate reads
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("can't commit messages: %s", err)
|
||||
log.Error(ctx, "can't commit messages: %s", err)
|
||||
}
|
||||
// Sync all files
|
||||
writer.Sync()
|
||||
log.Printf("manual sync finished, dur: %d", time.Now().Sub(s).Milliseconds())
|
||||
log.Info(ctx, "manual sync finished, dur: %d", time.Now().Sub(s).Milliseconds())
|
||||
default:
|
||||
err := consumer.ConsumeNext()
|
||||
if err != nil {
|
||||
log.Fatalf("Error on consumption: %v", err)
|
||||
log.Fatal(ctx, "error on consumption: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -10,6 +10,7 @@ import (
|
|||
config "openreplay/backend/internal/config/storage"
|
||||
"openreplay/backend/internal/storage"
|
||||
"openreplay/backend/pkg/failover"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
storageMetrics "openreplay/backend/pkg/metrics/storage"
|
||||
|
|
@ -18,27 +19,24 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
m := metrics.New()
|
||||
m.Register(storageMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, storageMetrics.List())
|
||||
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("can't init object storage: %s", err)
|
||||
log.Fatal(ctx, "can't init object storage: %s", err)
|
||||
}
|
||||
srv, err := storage.New(cfg, objStore)
|
||||
srv, err := storage.New(cfg, log, objStore)
|
||||
if err != nil {
|
||||
log.Printf("can't init storage service: %s", err)
|
||||
return
|
||||
log.Fatal(ctx, "can't init storage service: %s", err)
|
||||
}
|
||||
|
||||
counter := storage.NewLogCounter()
|
||||
sessionFinder, err := failover.NewSessionFinder(cfg, srv)
|
||||
if err != nil {
|
||||
log.Fatalf("can't init sessionFinder module: %s", err)
|
||||
log.Fatal(ctx, "can't init sessionFinder module: %s", err)
|
||||
}
|
||||
|
||||
consumer := queue.NewConsumer(
|
||||
|
|
@ -47,6 +45,7 @@ func main() {
|
|||
cfg.TopicTrigger,
|
||||
},
|
||||
messages.NewMessageIterator(
|
||||
log,
|
||||
func(msg messages.Message) {
|
||||
// Convert IOSSessionEnd to SessionEnd
|
||||
if msg.TypeID() == messages.MsgIOSSessionEnd {
|
||||
|
|
@ -56,10 +55,11 @@ func main() {
|
|||
}
|
||||
msg.Meta().SetMeta(oldMeta)
|
||||
}
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", msg.SessionID())
|
||||
// Process session to save mob files to s3
|
||||
sesEnd := msg.(*messages.SessionEnd)
|
||||
if err := srv.Process(sesEnd); err != nil {
|
||||
log.Printf("upload session err: %s, sessID: %d", err, msg.SessionID())
|
||||
if err := srv.Process(sessCtx, sesEnd); err != nil {
|
||||
log.Error(sessCtx, "process session err: %s", err)
|
||||
sessionFinder.Find(msg.SessionID(), sesEnd.Timestamp)
|
||||
}
|
||||
// Log timestamp of last processed session
|
||||
|
|
@ -72,7 +72,7 @@ func main() {
|
|||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
||||
log.Printf("Storage service started\n")
|
||||
log.Info(ctx, "Storage service started")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
|
@ -81,23 +81,23 @@ func main() {
|
|||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
log.Info(ctx, "caught signal %v: terminating", sig)
|
||||
sessionFinder.Stop()
|
||||
srv.Wait()
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-counterTick:
|
||||
go counter.Print()
|
||||
go log.Info(ctx, "%s", counter.Log())
|
||||
srv.Wait()
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("can't commit messages: %s", err)
|
||||
log.Error(ctx, "can't commit messages: %s", err)
|
||||
}
|
||||
case msg := <-consumer.Rebalanced():
|
||||
log.Println(msg)
|
||||
log.Info(ctx, "rebalanced: %v", msg)
|
||||
default:
|
||||
err := consumer.ConsumeNext()
|
||||
if err != nil {
|
||||
log.Fatalf("Error on consumption: %v", err)
|
||||
log.Fatal(ctx, "error on consumption: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ go 1.20
|
|||
|
||||
require (
|
||||
cloud.google.com/go/logging v1.7.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0
|
||||
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.2.0
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
|
|
@ -11,6 +12,7 @@ require (
|
|||
github.com/aws/aws-sdk-go v1.44.98
|
||||
github.com/btcsuite/btcutil v1.0.2
|
||||
github.com/confluentinc/confluent-kafka-go/v2 v2.2.0
|
||||
github.com/docker/distribution v2.8.1+incompatible
|
||||
github.com/elastic/go-elasticsearch/v7 v7.13.1
|
||||
github.com/go-redis/redis v6.15.9+incompatible
|
||||
github.com/google/uuid v1.3.0
|
||||
|
|
@ -28,6 +30,7 @@ require (
|
|||
github.com/sethvargo/go-envconfig v0.7.0
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
|
||||
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe
|
||||
go.uber.org/zap v1.17.0
|
||||
golang.org/x/net v0.17.0
|
||||
google.golang.org/api v0.126.0
|
||||
)
|
||||
|
|
@ -38,7 +41,6 @@ require (
|
|||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/iam v1.1.1 // indirect
|
||||
cloud.google.com/go/longrunning v0.5.1 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 // indirect
|
||||
github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
|
|
@ -63,9 +65,12 @@ require (
|
|||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/otel v1.7.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.7.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
golang.org/x/crypto v0.14.0 // indirect
|
||||
golang.org/x/oauth2 v0.10.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
|
|
|
|||
|
|
@ -1773,13 +1773,16 @@ go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg
|
|||
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import (
|
|||
"crypto/x509"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"os"
|
||||
|
|
@ -46,7 +45,10 @@ func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage) (*cacher,
|
|||
return nil, errors.New("object storage is nil")
|
||||
}
|
||||
|
||||
rewriter := assets.NewRewriter(cfg.AssetsOrigin)
|
||||
rewriter, err := assets.NewRewriter(cfg.AssetsOrigin)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't create rewriter")
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
|
|
@ -59,12 +61,13 @@ func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage) (*cacher,
|
|||
|
||||
cert, err = tls.LoadX509KeyPair(cfg.ClientCertFilePath, cfg.ClientKeyFilePath)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating x509 keypair from the client cert file %s and client key file %s , Error: %s", err, cfg.ClientCertFilePath, cfg.ClientKeyFilePath)
|
||||
return nil, fmt.Errorf("error creating x509 keypair from the client cert file %s and client key file %s , Error: %s",
|
||||
err, cfg.ClientCertFilePath, cfg.ClientKeyFilePath)
|
||||
}
|
||||
|
||||
caCert, err := os.ReadFile(cfg.CaCertFilePath)
|
||||
if err != nil {
|
||||
log.Fatalf("Error opening cert file %s, Error: %s", cfg.CaCertFilePath, err)
|
||||
return nil, fmt.Errorf("error opening cert file %s, Error: %s", cfg.CaCertFilePath, err)
|
||||
}
|
||||
caCertPool := x509.NewCertPool()
|
||||
caCertPool.AppendCertsFromPEM(caCert)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package cacher
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
)
|
||||
|
||||
|
|
@ -71,10 +70,8 @@ func (p *WorkerPool) AddTask(task *Task) {
|
|||
}
|
||||
|
||||
func (p *WorkerPool) Stop() {
|
||||
log.Printf("stopping workers")
|
||||
p.term.Do(func() {
|
||||
close(p.done)
|
||||
})
|
||||
p.wg.Wait()
|
||||
log.Printf("all workers have been stopped")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,12 +2,10 @@ package canvas_handler
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
"openreplay/backend/pkg/pool"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
|
|
@ -15,28 +13,34 @@ import (
|
|||
"time"
|
||||
|
||||
config "openreplay/backend/internal/config/canvas-handler"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
"openreplay/backend/pkg/pool"
|
||||
)
|
||||
|
||||
type saveTask struct {
|
||||
sessionID uint64
|
||||
name string
|
||||
image *bytes.Buffer
|
||||
}
|
||||
|
||||
type uploadTask struct {
|
||||
path string
|
||||
name string
|
||||
}
|
||||
|
||||
type ImageStorage struct {
|
||||
cfg *config.Config
|
||||
log logger.Logger
|
||||
basePath string
|
||||
saverPool pool.WorkerPool
|
||||
uploaderPool pool.WorkerPool
|
||||
objStorage objectstorage.ObjectStorage
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, objStorage objectstorage.ObjectStorage) (*ImageStorage, error) {
|
||||
type saveTask struct {
|
||||
ctx context.Context
|
||||
sessionID uint64
|
||||
name string
|
||||
image *bytes.Buffer
|
||||
}
|
||||
|
||||
type uploadTask struct {
|
||||
ctx context.Context
|
||||
path string
|
||||
name string
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage) (*ImageStorage, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
|
|
@ -47,6 +51,7 @@ func New(cfg *config.Config, objStorage objectstorage.ObjectStorage) (*ImageStor
|
|||
}
|
||||
s := &ImageStorage{
|
||||
cfg: cfg,
|
||||
log: log,
|
||||
basePath: path,
|
||||
objStorage: objStorage,
|
||||
}
|
||||
|
|
@ -57,23 +62,46 @@ func New(cfg *config.Config, objStorage objectstorage.ObjectStorage) (*ImageStor
|
|||
|
||||
func (v *ImageStorage) Wait() {
|
||||
v.saverPool.Pause()
|
||||
v.uploaderPool.Pause()
|
||||
}
|
||||
|
||||
func (v *ImageStorage) sendToS3(payload interface{}) {
|
||||
task := payload.(*uploadTask)
|
||||
start := time.Now()
|
||||
video, err := os.ReadFile(task.path)
|
||||
func (v *ImageStorage) SaveCanvasToDisk(ctx context.Context, sessID uint64, data []byte) error {
|
||||
type canvasData struct {
|
||||
Name string
|
||||
Data []byte
|
||||
}
|
||||
var msg = &canvasData{}
|
||||
if err := json.Unmarshal(data, msg); err != nil {
|
||||
return fmt.Errorf("can't parse canvas message, err: %s", err)
|
||||
}
|
||||
v.saverPool.Submit(&saveTask{ctx: ctx, sessionID: sessID, name: msg.Name, image: bytes.NewBuffer(msg.Data)})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *ImageStorage) writeToDisk(payload interface{}) {
|
||||
task := payload.(*saveTask)
|
||||
path := fmt.Sprintf("%s/%d/", v.basePath, task.sessionID)
|
||||
|
||||
// Ensure the directory exists
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
v.log.Fatal(task.ctx, "can't create a dir, err: %s", err)
|
||||
}
|
||||
|
||||
// Write images to disk
|
||||
outFile, err := os.Create(path + task.name)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read video file: %v", err)
|
||||
v.log.Fatal(task.ctx, "can't create an image: %s", err)
|
||||
}
|
||||
if err := v.objStorage.Upload(bytes.NewReader(video), task.name, "application/octet-stream", objectstorage.Zstd); err != nil {
|
||||
log.Fatalf("Storage: start uploading replay failed. %s", err)
|
||||
if _, err := io.Copy(outFile, task.image); err != nil {
|
||||
v.log.Fatal(task.ctx, "can't copy data to image: %s", err)
|
||||
}
|
||||
log.Printf("Replay file (size: %d) uploaded successfully in %v", len(video), time.Since(start))
|
||||
outFile.Close()
|
||||
|
||||
v.log.Info(task.ctx, "canvas image saved, name: %s, size: %3.3f mb", task.name, float64(task.image.Len())/1024.0/1024.0)
|
||||
return
|
||||
}
|
||||
|
||||
func (v *ImageStorage) PackSessionCanvases(sessID uint64) error {
|
||||
func (v *ImageStorage) PackSessionCanvases(ctx context.Context, sessID uint64) error {
|
||||
path := fmt.Sprintf("%s/%d/", v.basePath, sessID)
|
||||
|
||||
// Check that the directory exists
|
||||
|
|
@ -92,7 +120,7 @@ func (v *ImageStorage) PackSessionCanvases(sessID uint64) error {
|
|||
name := strings.Split(file.Name(), ".")
|
||||
parts := strings.Split(name[0], "_")
|
||||
if len(name) != 2 || len(parts) != 3 {
|
||||
log.Printf("unknown file name: %s, skipping", file.Name())
|
||||
v.log.Warn(ctx, "unknown file name: %s, skipping", file.Name())
|
||||
continue
|
||||
}
|
||||
canvasID := fmt.Sprintf("%s_%s", parts[0], parts[1])
|
||||
|
|
@ -100,13 +128,11 @@ func (v *ImageStorage) PackSessionCanvases(sessID uint64) error {
|
|||
}
|
||||
|
||||
sessionID := strconv.FormatUint(sessID, 10)
|
||||
//
|
||||
for name := range names {
|
||||
// Save to archives
|
||||
archPath := fmt.Sprintf("%s%s.tar.zst", path, name)
|
||||
fullCmd := fmt.Sprintf("find %s -type f -name '%s*' | tar -cf - --files-from=- | zstd -o %s",
|
||||
path, name, archPath)
|
||||
log.Printf("Executing command: %s", fullCmd)
|
||||
cmd := exec.Command("sh", "-c", fullCmd)
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
|
|
@ -114,48 +140,23 @@ func (v *ImageStorage) PackSessionCanvases(sessID uint64) error {
|
|||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
log.Printf("Failed to execute command: %v, stderr: %v", err, stderr.String())
|
||||
return err
|
||||
return fmt.Errorf("failed to execute command, err: %s, stderr: %v", err, stderr.String())
|
||||
}
|
||||
|
||||
v.uploaderPool.Submit(&uploadTask{path: archPath, name: sessionID + "/" + name + ".tar.zst"})
|
||||
v.uploaderPool.Submit(&uploadTask{ctx: ctx, path: archPath, name: sessionID + "/" + name + ".tar.zst"})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *ImageStorage) SaveCanvasToDisk(sessID uint64, data []byte) error {
|
||||
type canvasData struct {
|
||||
Name string
|
||||
Data []byte
|
||||
}
|
||||
var msg = &canvasData{}
|
||||
if err := json.Unmarshal(data, msg); err != nil {
|
||||
log.Printf("can't parse canvas message, err: %s", err)
|
||||
}
|
||||
// Use the same workflow
|
||||
v.saverPool.Submit(&saveTask{sessionID: sessID, name: msg.Name, image: bytes.NewBuffer(msg.Data)})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *ImageStorage) writeToDisk(payload interface{}) {
|
||||
task := payload.(*saveTask)
|
||||
path := fmt.Sprintf("%s/%d/", v.basePath, task.sessionID)
|
||||
|
||||
// Ensure the directory exists
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
log.Fatalf("Error creating directories: %v", err)
|
||||
}
|
||||
|
||||
// Write images to disk
|
||||
outFile, err := os.Create(path + task.name) // or open file in rewrite mode
|
||||
func (v *ImageStorage) sendToS3(payload interface{}) {
|
||||
task := payload.(*uploadTask)
|
||||
start := time.Now()
|
||||
video, err := os.ReadFile(task.path)
|
||||
if err != nil {
|
||||
log.Printf("can't create file: %s", err.Error())
|
||||
v.log.Fatal(task.ctx, "failed to read canvas archive: %s", err)
|
||||
}
|
||||
if _, err := io.Copy(outFile, task.image); err != nil {
|
||||
log.Printf("can't copy file: %s", err.Error())
|
||||
if err := v.objStorage.Upload(bytes.NewReader(video), task.name, "application/octet-stream", objectstorage.Zstd); err != nil {
|
||||
v.log.Fatal(task.ctx, "failed to upload canvas to storage: %s", err)
|
||||
}
|
||||
outFile.Close()
|
||||
|
||||
log.Printf("new canvas image, sessID: %d, name: %s, size: %3.3f mb", task.sessionID, task.name, float64(task.image.Len())/1024.0/1024.0)
|
||||
v.log.Info(task.ctx, "replay file (size: %d) uploaded successfully in %v", len(video), time.Since(start))
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"openreplay/backend/internal/config/common"
|
||||
"openreplay/backend/internal/config/configurator"
|
||||
"openreplay/backend/internal/config/objectstorage"
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
|
|
@ -20,8 +21,8 @@ type Config struct {
|
|||
ClientCertFilePath string `env:"CLIENT_CERT_FILE_PATH"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
func New(log logger.Logger) *Config {
|
||||
cfg := &Config{}
|
||||
configurator.Process(cfg)
|
||||
configurator.Process(log, cfg)
|
||||
return cfg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"openreplay/backend/internal/config/common"
|
||||
"openreplay/backend/internal/config/configurator"
|
||||
"openreplay/backend/internal/config/objectstorage"
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
|
|
@ -17,8 +18,8 @@ type Config struct {
|
|||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
func New(log logger.Logger) *Config {
|
||||
cfg := &Config{}
|
||||
configurator.Process(cfg)
|
||||
configurator.Process(log, cfg)
|
||||
return cfg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
|
@ -13,7 +12,9 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/sethvargo/go-envconfig"
|
||||
|
||||
"openreplay/backend/internal/config/common"
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
func readFile(path string) (map[string]string, error) {
|
||||
|
|
@ -26,7 +27,7 @@ func readFile(path string) (map[string]string, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("can't read file: %s", err)
|
||||
}
|
||||
log.Println(data)
|
||||
|
||||
res := make(map[string]string)
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for _, line := range lines {
|
||||
|
|
@ -42,14 +43,15 @@ func readFile(path string) (map[string]string, error) {
|
|||
return res, nil
|
||||
}
|
||||
|
||||
func parseFile(a interface{}, path string) {
|
||||
func parseFile(log logger.Logger, a interface{}, path string) {
|
||||
ctx := context.Background()
|
||||
// Skip parsing process without logs if we don't have path to config file
|
||||
if path == "" {
|
||||
return
|
||||
}
|
||||
envs, err := readFile(path)
|
||||
if err != nil {
|
||||
log.Printf("can't parse config file: %s", err)
|
||||
log.Error(ctx, "can't read config file, err: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -71,48 +73,48 @@ func parseFile(a interface{}, path string) {
|
|||
case "int", "int8", "int16", "int32", "int64":
|
||||
intValue, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
log.Printf("can't parse int value: %s", err)
|
||||
log.Error(ctx, "can't parse int value: %s", err)
|
||||
continue
|
||||
}
|
||||
val.Field(i).SetInt(int64(intValue))
|
||||
case "uint", "uint8", "uint16", "uint32", "uint64":
|
||||
uintValue, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
log.Printf("can't parse uint value: %s", err)
|
||||
log.Error(ctx, "can't parse uint value: %s", err)
|
||||
continue
|
||||
}
|
||||
val.Field(i).SetUint(uint64(uintValue))
|
||||
case "bool":
|
||||
boolValue, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
log.Printf("can't parse bool value: %s", err)
|
||||
log.Error(ctx, "can't parse bool value: %s", err)
|
||||
continue
|
||||
}
|
||||
val.Field(i).SetBool(boolValue)
|
||||
case "time.Duration":
|
||||
d, err := time.ParseDuration(value)
|
||||
if err != nil {
|
||||
log.Printf("can't parse time.Duration value: %s", err)
|
||||
log.Error(ctx, "can't parse time.Duration value: %s", err)
|
||||
continue
|
||||
}
|
||||
val.Field(i).SetInt(int64(d))
|
||||
case "map[string]string":
|
||||
var stringMap map[string]string
|
||||
if err := json.Unmarshal([]byte(value), &stringMap); err != nil {
|
||||
log.Printf("can't parse map[string]string value: %s", err)
|
||||
log.Error(ctx, "can't parse map[string]string value: %s", err)
|
||||
continue
|
||||
}
|
||||
val.Field(i).Set(reflect.ValueOf(stringMap))
|
||||
default:
|
||||
log.Println("unknown config type: ", val.Type().Field(i).Type.String())
|
||||
log.Error(ctx, "unknown config type: ", val.Type().Field(i).Type.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Process(cfg common.Configer) {
|
||||
func Process(log logger.Logger, cfg common.Configer) {
|
||||
if err := envconfig.Process(context.Background(), cfg); err != nil {
|
||||
log.Fatalf("error while processing env vars: %s", err)
|
||||
log.Fatal(context.Background(), "error while processing env vars, err: %s", err)
|
||||
}
|
||||
parseFile(cfg, cfg.GetConfigPath())
|
||||
parseFile(log, cfg, cfg.GetConfigPath())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ import (
|
|||
"openreplay/backend/internal/config/configurator"
|
||||
"openreplay/backend/internal/config/objectstorage"
|
||||
"openreplay/backend/internal/config/redis"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
|
@ -26,8 +27,8 @@ type Config struct {
|
|||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
func New(log logger.Logger) *Config {
|
||||
cfg := &Config{}
|
||||
configurator.Process(cfg)
|
||||
configurator.Process(log, cfg)
|
||||
return cfg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"openreplay/backend/internal/config/common"
|
||||
"openreplay/backend/internal/config/configurator"
|
||||
"openreplay/backend/internal/config/redis"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
|
@ -25,8 +26,8 @@ type Config struct {
|
|||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
func New(log logger.Logger) *Config {
|
||||
cfg := &Config{}
|
||||
configurator.Process(cfg)
|
||||
configurator.Process(log, cfg)
|
||||
return cfg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"openreplay/backend/internal/config/common"
|
||||
"openreplay/backend/internal/config/configurator"
|
||||
"openreplay/backend/internal/config/redis"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
|
@ -24,8 +25,8 @@ type Config struct {
|
|||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
func New(log logger.Logger) *Config {
|
||||
cfg := &Config{}
|
||||
configurator.Process(cfg)
|
||||
configurator.Process(log, cfg)
|
||||
return cfg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package heuristics
|
|||
import (
|
||||
"openreplay/backend/internal/config/common"
|
||||
"openreplay/backend/internal/config/configurator"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
)
|
||||
|
||||
|
|
@ -17,9 +18,9 @@ type Config struct {
|
|||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
func New(log logger.Logger) *Config {
|
||||
cfg := &Config{}
|
||||
configurator.Process(cfg)
|
||||
configurator.Process(log, cfg)
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import (
|
|||
"openreplay/backend/internal/config/objectstorage"
|
||||
"openreplay/backend/internal/config/redis"
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
|
@ -39,8 +40,8 @@ type Config struct {
|
|||
WorkerID uint16
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
func New(log logger.Logger) *Config {
|
||||
cfg := &Config{WorkerID: env.WorkerID()}
|
||||
configurator.Process(cfg)
|
||||
configurator.Process(log, cfg)
|
||||
return cfg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"openreplay/backend/internal/config/common"
|
||||
"openreplay/backend/internal/config/configurator"
|
||||
"openreplay/backend/internal/config/objectstorage"
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
|
|
@ -16,8 +17,8 @@ type Config struct {
|
|||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
func New(log logger.Logger) *Config {
|
||||
cfg := &Config{}
|
||||
configurator.Process(cfg)
|
||||
configurator.Process(log, cfg)
|
||||
return cfg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package integrations
|
|||
import (
|
||||
"openreplay/backend/internal/config/common"
|
||||
"openreplay/backend/internal/config/configurator"
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
|
|
@ -13,8 +14,8 @@ type Config struct {
|
|||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
func New(log logger.Logger) *Config {
|
||||
cfg := &Config{}
|
||||
configurator.Process(cfg)
|
||||
configurator.Process(log, cfg)
|
||||
return cfg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package sink
|
|||
import (
|
||||
"openreplay/backend/internal/config/common"
|
||||
"openreplay/backend/internal/config/configurator"
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
|
|
@ -26,8 +27,8 @@ type Config struct {
|
|||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
func New(log logger.Logger) *Config {
|
||||
cfg := &Config{}
|
||||
configurator.Process(cfg)
|
||||
configurator.Process(log, cfg)
|
||||
return cfg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"openreplay/backend/internal/config/common"
|
||||
"openreplay/backend/internal/config/configurator"
|
||||
"openreplay/backend/internal/config/objectstorage"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
|
@ -26,8 +27,8 @@ type Config struct {
|
|||
CompressionAlgo string `env:"COMPRESSION_ALGO,default=gzip"` // none, gzip, brotli, zstd
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
func New(log logger.Logger) *Config {
|
||||
cfg := &Config{}
|
||||
configurator.Process(cfg)
|
||||
configurator.Process(log, cfg)
|
||||
return cfg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,21 +0,0 @@
|
|||
package videostorage
|
||||
|
||||
import (
|
||||
"openreplay/backend/internal/config/common"
|
||||
"openreplay/backend/internal/config/configurator"
|
||||
"openreplay/backend/internal/config/objectstorage"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
common.Config
|
||||
objectstorage.ObjectsConfig
|
||||
FSDir string `env:"FS_DIR,required"`
|
||||
GroupCanvasVideo string `env:"GROUP_CANVAS_VIDEO,required"`
|
||||
TopicCanvasTrigger string `env:"TOPIC_CANVAS_TRIGGER,required"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
cfg := &Config{}
|
||||
configurator.Process(cfg)
|
||||
return cfg
|
||||
}
|
||||
|
|
@ -1,16 +1,17 @@
|
|||
package datasaver
|
||||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/tags"
|
||||
"context"
|
||||
|
||||
"openreplay/backend/internal/config/db"
|
||||
"openreplay/backend/pkg/db/clickhouse"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/db/types"
|
||||
"openreplay/backend/pkg/logger"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
queue "openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
"openreplay/backend/pkg/tags"
|
||||
)
|
||||
|
||||
type Saver interface {
|
||||
|
|
@ -20,6 +21,7 @@ type Saver interface {
|
|||
}
|
||||
|
||||
type saverImpl struct {
|
||||
log logger.Logger
|
||||
cfg *db.Config
|
||||
pg *postgres.Conn
|
||||
sessions sessions.Sessions
|
||||
|
|
@ -28,8 +30,9 @@ type saverImpl struct {
|
|||
tags tags.Tags
|
||||
}
|
||||
|
||||
func New(cfg *db.Config, pg *postgres.Conn, session sessions.Sessions, tags tags.Tags) Saver {
|
||||
func New(log logger.Logger, cfg *db.Config, pg *postgres.Conn, session sessions.Sessions, tags tags.Tags) Saver {
|
||||
s := &saverImpl{
|
||||
log: log,
|
||||
cfg: cfg,
|
||||
pg: pg,
|
||||
sessions: session,
|
||||
|
|
@ -40,6 +43,7 @@ func New(cfg *db.Config, pg *postgres.Conn, session sessions.Sessions, tags tags
|
|||
}
|
||||
|
||||
func (s *saverImpl) Handle(msg Message) {
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", msg.SessionID())
|
||||
if msg.TypeID() == MsgCustomEvent {
|
||||
defer s.Handle(types.WrapCustomEvent(msg.(*CustomEvent)))
|
||||
}
|
||||
|
|
@ -47,7 +51,7 @@ func (s *saverImpl) Handle(msg Message) {
|
|||
// Handle iOS messages
|
||||
if err := s.handleMobileMessage(msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("iOS Message Insertion Error %v, SessionID: %v, Message: %v", err, msg.SessionID(), msg)
|
||||
s.log.Error(sessCtx, "mobile message insertion error, msg: %+v, err: %s", msg, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
@ -55,14 +59,14 @@ func (s *saverImpl) Handle(msg Message) {
|
|||
// Handle Web messages
|
||||
if err := s.handleMessage(msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v, SessionID: %v, Message: %v", err, msg.SessionID(), msg)
|
||||
s.log.Error(sessCtx, "web message insertion error, msg: %+v, err: %s", msg, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.handleExtraMessage(msg); err != nil {
|
||||
log.Printf("Stats Insertion Error %v; Session: %d, Message: %v", err, msg.SessionID(), msg)
|
||||
s.log.Error(sessCtx, "extra message insertion error, msg: %+v, err: %s", msg, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
@ -73,10 +77,6 @@ func (s *saverImpl) handleMobileMessage(msg Message) error {
|
|||
return err
|
||||
}
|
||||
switch m := msg.(type) {
|
||||
case *IOSSessionStart:
|
||||
return s.pg.InsertIOSSessionStart(m.SessionID(), m)
|
||||
case *IOSSessionEnd:
|
||||
return s.pg.InsertIOSSessionEnd(m.SessionID(), m)
|
||||
case *IOSUserID:
|
||||
if err = s.sessions.UpdateUserID(session.SessionID, m.ID); err != nil {
|
||||
return err
|
||||
|
|
@ -124,6 +124,7 @@ func (s *saverImpl) handleMessage(msg Message) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", msg.SessionID())
|
||||
switch m := msg.(type) {
|
||||
case *SessionStart:
|
||||
return s.pg.HandleStartEvent(m)
|
||||
|
|
@ -185,7 +186,11 @@ func (s *saverImpl) handleMessage(msg Message) error {
|
|||
case *GraphQL:
|
||||
return s.pg.InsertWebGraphQL(session, m)
|
||||
case *JSException:
|
||||
if err = s.pg.InsertWebErrorEvent(session, types.WrapJSException(m)); err != nil {
|
||||
wrapper, err := types.WrapJSException(m)
|
||||
if err != nil {
|
||||
s.log.Warn(sessCtx, "error on wrapping JSException: %v", err)
|
||||
}
|
||||
if err = s.pg.InsertWebErrorEvent(session, wrapper); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.sessions.UpdateIssuesStats(session.SessionID, 1, 1000)
|
||||
|
|
@ -226,12 +231,12 @@ func (s *saverImpl) Commit() error {
|
|||
func (s *saverImpl) Close() error {
|
||||
if s.pg != nil {
|
||||
if err := s.pg.Close(); err != nil {
|
||||
log.Printf("pg.Close error: %s", err)
|
||||
s.log.Error(context.Background(), "pg.Close error: %s", err)
|
||||
}
|
||||
}
|
||||
if s.ch != nil {
|
||||
if err := s.ch.Stop(); err != nil {
|
||||
log.Printf("ch.Close error: %s", err)
|
||||
s.log.Error(context.Background(), "ch.Close error: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -1,19 +1,22 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/memory"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/config/db"
|
||||
"openreplay/backend/internal/db/datasaver"
|
||||
"openreplay/backend/internal/service"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/memory"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
)
|
||||
|
||||
type dbImpl struct {
|
||||
log logger.Logger
|
||||
cfg *db.Config
|
||||
ctx context.Context
|
||||
consumer types.Consumer
|
||||
saver datasaver.Saver
|
||||
mm memory.Manager
|
||||
|
|
@ -22,9 +25,11 @@ type dbImpl struct {
|
|||
finished chan struct{}
|
||||
}
|
||||
|
||||
func New(cfg *db.Config, consumer types.Consumer, saver datasaver.Saver, mm memory.Manager, sessions sessions.Sessions) service.Interface {
|
||||
func New(log logger.Logger, cfg *db.Config, consumer types.Consumer, saver datasaver.Saver, mm memory.Manager, sessions sessions.Sessions) service.Interface {
|
||||
s := &dbImpl{
|
||||
log: log,
|
||||
cfg: cfg,
|
||||
ctx: context.Background(),
|
||||
consumer: consumer,
|
||||
saver: saver,
|
||||
mm: mm,
|
||||
|
|
@ -46,11 +51,11 @@ func (d *dbImpl) run() {
|
|||
case <-commitTick:
|
||||
d.commit()
|
||||
case msg := <-d.consumer.Rebalanced():
|
||||
log.Println(msg)
|
||||
d.log.Info(d.ctx, "Rebalanced: %v", msg)
|
||||
case <-d.done:
|
||||
d.commit()
|
||||
if err := d.saver.Close(); err != nil {
|
||||
log.Printf("saver.Close error: %s", err)
|
||||
d.log.Error(d.ctx, "saver.Close error: %s", err)
|
||||
}
|
||||
d.consumer.Close()
|
||||
d.finished <- struct{}{}
|
||||
|
|
@ -59,7 +64,7 @@ func (d *dbImpl) run() {
|
|||
continue
|
||||
}
|
||||
if err := d.consumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consumption: %v", err)
|
||||
d.log.Fatal(d.ctx, "Error on consumption: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,20 +1,23 @@
|
|||
package heuristics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"openreplay/backend/pkg/memory"
|
||||
"openreplay/backend/pkg/messages"
|
||||
metrics "openreplay/backend/pkg/metrics/heuristics"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/config/heuristics"
|
||||
"openreplay/backend/internal/service"
|
||||
"openreplay/backend/pkg/builders"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/memory"
|
||||
"openreplay/backend/pkg/messages"
|
||||
metrics "openreplay/backend/pkg/metrics/heuristics"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
type heuristicsImpl struct {
|
||||
log logger.Logger
|
||||
ctx context.Context
|
||||
cfg *heuristics.Config
|
||||
producer types.Producer
|
||||
consumer types.Consumer
|
||||
|
|
@ -24,8 +27,10 @@ type heuristicsImpl struct {
|
|||
finished chan struct{}
|
||||
}
|
||||
|
||||
func New(cfg *heuristics.Config, p types.Producer, c types.Consumer, e builders.EventBuilder, mm memory.Manager) service.Interface {
|
||||
func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Consumer, e builders.EventBuilder, mm memory.Manager) service.Interface {
|
||||
s := &heuristicsImpl{
|
||||
log: log,
|
||||
ctx: context.Background(),
|
||||
cfg: cfg,
|
||||
producer: p,
|
||||
consumer: c,
|
||||
|
|
@ -44,7 +49,7 @@ func (h *heuristicsImpl) run() {
|
|||
select {
|
||||
case evt := <-h.events.Events():
|
||||
if err := h.producer.Produce(h.cfg.TopicAnalytics, evt.SessionID(), evt.Encode()); err != nil {
|
||||
log.Printf("can't send new event to queue: %s", err)
|
||||
h.log.Error(h.ctx, "can't send new event to queue: %s", err)
|
||||
} else {
|
||||
metrics.IncreaseTotalEvents(messageTypeName(evt))
|
||||
}
|
||||
|
|
@ -52,14 +57,14 @@ func (h *heuristicsImpl) run() {
|
|||
h.producer.Flush(h.cfg.ProducerTimeout)
|
||||
h.consumer.Commit()
|
||||
case msg := <-h.consumer.Rebalanced():
|
||||
log.Println(msg)
|
||||
h.log.Info(h.ctx, "rebalanced: %v", msg)
|
||||
case <-h.done:
|
||||
// Stop event builder and flush all events
|
||||
log.Println("stopping heuristics service")
|
||||
h.log.Info(h.ctx, "stopping heuristics service")
|
||||
h.events.Stop()
|
||||
for evt := range h.events.Events() {
|
||||
if err := h.producer.Produce(h.cfg.TopicAnalytics, evt.SessionID(), evt.Encode()); err != nil {
|
||||
log.Printf("can't send new event to queue: %s", err)
|
||||
h.log.Error(h.ctx, "can't send new event to queue: %s", err)
|
||||
}
|
||||
}
|
||||
h.producer.Close(h.cfg.ProducerTimeout)
|
||||
|
|
@ -71,7 +76,7 @@ func (h *heuristicsImpl) run() {
|
|||
continue
|
||||
}
|
||||
if err := h.consumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consuming: %v", err)
|
||||
h.log.Fatal(h.ctx, "error on consumption: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,10 +1,11 @@
|
|||
package geoip
|
||||
|
||||
import (
|
||||
"github.com/oschwald/maxminddb-golang"
|
||||
"log"
|
||||
"errors"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/oschwald/maxminddb-golang"
|
||||
)
|
||||
|
||||
type geoIPRecord struct {
|
||||
|
|
@ -44,34 +45,33 @@ func UnpackGeoRecord(pkg string) *GeoRecord {
|
|||
}
|
||||
|
||||
type GeoParser interface {
|
||||
Parse(ip net.IP) *GeoRecord
|
||||
Parse(ip net.IP) (*GeoRecord, error)
|
||||
}
|
||||
|
||||
type geoParser struct {
|
||||
r *maxminddb.Reader
|
||||
}
|
||||
|
||||
func New(file string) GeoParser {
|
||||
func New(file string) (GeoParser, error) {
|
||||
r, err := maxminddb.Open(file)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
return nil, err
|
||||
}
|
||||
return &geoParser{r}
|
||||
return &geoParser{r}, nil
|
||||
}
|
||||
|
||||
func (geoIP *geoParser) Parse(ip net.IP) *GeoRecord {
|
||||
func (geoIP *geoParser) Parse(ip net.IP) (*GeoRecord, error) {
|
||||
res := &GeoRecord{
|
||||
Country: "UN",
|
||||
State: "",
|
||||
City: "",
|
||||
}
|
||||
if ip == nil {
|
||||
return res
|
||||
return res, errors.New("IP is nil")
|
||||
}
|
||||
var record geoIPRecord
|
||||
if err := geoIP.r.Lookup(ip, &record); err != nil {
|
||||
log.Println(err)
|
||||
return res
|
||||
return res, err
|
||||
}
|
||||
if record.Country.ISOCode != "" {
|
||||
res.Country = record.Country.ISOCode
|
||||
|
|
@ -80,5 +80,5 @@ func (geoIP *geoParser) Parse(ip net.IP) *GeoRecord {
|
|||
res.State = record.States[0].Names["en"]
|
||||
}
|
||||
res.City = record.City.Names["en"]
|
||||
return res
|
||||
return res, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ func TestGeoIP(t *testing.T) {
|
|||
LoadGeoLiteDB()
|
||||
defer DeleteGeoLiteDB()
|
||||
|
||||
geoIP := New("geo.mmdb")
|
||||
geoIP, _ := New("geo.mmdb")
|
||||
|
||||
ip := net.ParseIP("92.151.113.120")
|
||||
correctResult := &GeoRecord{
|
||||
|
|
|
|||
|
|
@ -7,5 +7,5 @@ import (
|
|||
)
|
||||
|
||||
func (e *Router) getConditions(w http.ResponseWriter, r *http.Request) {
|
||||
ResponseWithError(w, http.StatusNotImplemented, errors.New("no support"), time.Now(), r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusNotImplemented, errors.New("no support"), time.Now(), r.URL.Path, 0)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,14 +1,14 @@
|
|||
package router
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"openreplay/backend/internal/http/ios"
|
||||
"openreplay/backend/internal/http/util"
|
||||
"openreplay/backend/internal/http/uuid"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/messages"
|
||||
|
|
@ -20,43 +20,50 @@ import (
|
|||
|
||||
func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
req := &StartIOSSessionRequest{}
|
||||
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
body := http.MaxBytesReader(w, r.Body, e.cfg.JsonSizeLimit)
|
||||
defer body.Close()
|
||||
|
||||
req := &StartIOSSessionRequest{}
|
||||
if err := json.NewDecoder(body).Decode(req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Add tracker version to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "tracker", req.TrackerVersion))
|
||||
|
||||
if req.ProjectKey == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("projectKey value required"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
p, err := e.services.Projects.GetProjectByKey(*req.ProjectKey)
|
||||
if err != nil {
|
||||
if postgres.IsNoRowsErr(err) {
|
||||
ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active"), startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusNotFound, errors.New("project doesn't exist or is not active"), startTime, r.URL.Path, 0)
|
||||
} else {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging
|
||||
e.log.Error(r.Context(), "failed to get project by key: %s, err: %s", *req.ProjectKey, err)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, errors.New("can't find a project"), startTime, r.URL.Path, 0)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Add projectID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", p.ProjectID)))
|
||||
|
||||
// Check if the project supports mobile sessions
|
||||
if !p.IsMobile() {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("project doesn't support mobile sessions"), startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("project doesn't support mobile sessions"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
if !checkMobileTrackerVersion(req.TrackerVersion) {
|
||||
ResponseWithError(w, http.StatusUpgradeRequired, errors.New("tracker version not supported"), startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusUpgradeRequired, errors.New("tracker version not supported"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -69,32 +76,33 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
if req.Condition != "" {
|
||||
rate, err := e.services.Conditions.GetRate(p.ProjectID, req.Condition, int(p.SampleRate))
|
||||
if err != nil {
|
||||
log.Printf("can't get condition rate: %s", err)
|
||||
e.log.Warn(r.Context(), "can't get condition rate, condition: %s, err: %s", req.Condition, err)
|
||||
} else {
|
||||
p.SampleRate = byte(rate)
|
||||
}
|
||||
} else {
|
||||
log.Printf("project sample rate: %d", p.SampleRate)
|
||||
}
|
||||
if dice >= p.SampleRate {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"), startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("capture rate miss"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
ua := e.services.UaParser.ParseFromHTTPRequest(r)
|
||||
if ua == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli()))
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
|
||||
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
|
||||
tokenData = &token.TokenData{sessionID, 0, expTime.UnixMilli()}
|
||||
|
||||
// Add sessionID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionID)))
|
||||
|
||||
geoInfo := e.ExtractGeoData(r)
|
||||
|
||||
if !req.DoNotRecord {
|
||||
|
|
@ -117,7 +125,7 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
UserDeviceMemorySize: req.DeviceMemory,
|
||||
UserDeviceHeapSize: req.DeviceMemory,
|
||||
}); err != nil {
|
||||
log.Printf("failed to add mobile session to DB: %v", err)
|
||||
e.log.Warn(r.Context(), "failed to add mobile session to DB: %s", err)
|
||||
}
|
||||
|
||||
sessStart := &messages.IOSSessionStart{
|
||||
|
|
@ -132,15 +140,14 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
UserDeviceType: ios.GetIOSDeviceType(req.UserDevice),
|
||||
UserCountry: geoInfo.Pack(),
|
||||
}
|
||||
log.Printf("mobile session start: %+v", sessStart)
|
||||
|
||||
if err := e.services.Producer.Produce(e.cfg.TopicRawIOS, tokenData.ID, sessStart.Encode()); err != nil {
|
||||
log.Printf("failed to produce mobile session start message: %v", err)
|
||||
e.log.Error(r.Context(), "failed to send mobile sessionStart event to queue: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ResponseWithJSON(w, &StartIOSSessionResponse{
|
||||
e.ResponseWithJSON(r.Context(), w, &StartIOSSessionResponse{
|
||||
Token: e.services.Tokenizer.Compose(*tokenData),
|
||||
UserUUID: userUUID,
|
||||
SessionID: strconv.FormatUint(tokenData.ID, 10),
|
||||
|
|
@ -155,9 +162,16 @@ func (e *Router) pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
startTime := time.Now()
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Add sessionID and projectID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
|
||||
if info, err := e.services.Sessions.Get(sessionData.ID); err == nil {
|
||||
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
|
||||
}
|
||||
|
||||
e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawIOS)
|
||||
}
|
||||
|
||||
|
|
@ -165,7 +179,7 @@ func (e *Router) pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Reque
|
|||
startTime := time.Now()
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil && err != token.EXPIRED {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
// Check timestamps here?
|
||||
|
|
@ -176,13 +190,19 @@ func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
startTime := time.Now()
|
||||
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil { // Should accept expired token?
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
if err != nil {
|
||||
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Add sessionID and projectID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
|
||||
if info, err := e.services.Sessions.Get(sessionData.ID); err == nil {
|
||||
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
|
||||
}
|
||||
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
r.Body = http.MaxBytesReader(w, r.Body, e.cfg.FileSizeLimit)
|
||||
|
|
@ -190,20 +210,20 @@ func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
err = r.ParseMultipartForm(5 * 1e6) // ~5Mb
|
||||
if err == http.ErrNotMultipart || err == http.ErrMissingBoundary {
|
||||
ResponseWithError(w, http.StatusUnsupportedMediaType, err, startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusUnsupportedMediaType, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
} else if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging
|
||||
return
|
||||
}
|
||||
|
||||
if r.MultipartForm == nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed"), startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, errors.New("multipart not parsed"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
if len(r.MultipartForm.Value["projectKey"]) == 0 {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("projectKey parameter missing"), startTime, r.URL.Path, 0) // status for missing/wrong parameter?
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, errors.New("projectKey parameter missing"), startTime, r.URL.Path, 0) // status for missing/wrong parameter?
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -216,16 +236,17 @@ func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
data, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
log.Fatalf("failed reading data: %s", err)
|
||||
file.Close()
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Uploading image... %v, len: %d", util.SafeString(fileHeader.Filename), len(data))
|
||||
file.Close()
|
||||
|
||||
if err := e.services.Producer.Produce(e.cfg.TopicRawImages, sessionData.ID, data); err != nil {
|
||||
log.Printf("failed to produce mobile session start message: %v", err)
|
||||
e.log.Warn(r.Context(), "failed to send image to queue: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, 0)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,29 +1,30 @@
|
|||
package router
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/gorilla/mux"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"openreplay/backend/internal/http/util"
|
||||
"openreplay/backend/pkg/featureflags"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
"openreplay/backend/pkg/uxtesting"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
"github.com/klauspost/compress/gzip"
|
||||
"openreplay/backend/internal/http/util"
|
||||
"openreplay/backend/internal/http/uuid"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/featureflags"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
"openreplay/backend/pkg/token"
|
||||
"openreplay/backend/pkg/uxtesting"
|
||||
)
|
||||
|
||||
func (e *Router) readBody(w http.ResponseWriter, r *http.Request, limit int64) ([]byte, error) {
|
||||
|
|
@ -44,7 +45,7 @@ func (e *Router) readBody(w http.ResponseWriter, r *http.Request, limit int64) (
|
|||
return nil, fmt.Errorf("can't read gzip body: %s", err)
|
||||
}
|
||||
if err := reader.Close(); err != nil {
|
||||
log.Printf("can't close gzip reader: %s", err)
|
||||
e.log.Warn(r.Context(), "can't close gzip reader: %s", err)
|
||||
}
|
||||
} else {
|
||||
bodyBytes, err = io.ReadAll(body)
|
||||
|
|
@ -52,7 +53,7 @@ func (e *Router) readBody(w http.ResponseWriter, r *http.Request, limit int64) (
|
|||
|
||||
// Close body
|
||||
if closeErr := body.Close(); closeErr != nil {
|
||||
log.Printf("error while closing request body: %s", closeErr)
|
||||
e.log.Warn(r.Context(), "error while closing request body: %s", closeErr)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -110,14 +111,13 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
// Check request body
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
|
||||
if err != nil {
|
||||
log.Printf("error while reading request body: %s", err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
bodySize = len(bodyBytes)
|
||||
|
|
@ -125,37 +125,42 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
// Parse request body
|
||||
req := &StartSessionRequest{}
|
||||
if err := json.Unmarshal(bodyBytes, req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Add tracker version to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "tracker", req.TrackerVersion))
|
||||
|
||||
// Handler's logic
|
||||
if req.ProjectKey == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
p, err := e.services.Projects.GetProjectByKey(*req.ProjectKey)
|
||||
if err != nil {
|
||||
if postgres.IsNoRowsErr(err) {
|
||||
ResponseWithError(w, http.StatusNotFound,
|
||||
errors.New("project doesn't exist or capture limit has been reached"), startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusNotFound, errors.New("project doesn't exist"), startTime, r.URL.Path, bodySize)
|
||||
} else {
|
||||
log.Printf("can't get project by key: %s", err)
|
||||
ResponseWithError(w, http.StatusInternalServerError, errors.New("can't get project by key"), startTime, r.URL.Path, bodySize)
|
||||
e.log.Error(r.Context(), "failed to get project by key: %s, err: %s", *req.ProjectKey, err)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, errors.New("can't find a project"), startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Add projectID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", p.ProjectID)))
|
||||
|
||||
// Check if the project supports mobile sessions
|
||||
if !p.IsWeb() {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("project doesn't support web sessions"), startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("project doesn't support web sessions"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
ua := e.services.UaParser.ParseFromHTTPRequest(r)
|
||||
if ua == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -164,30 +169,28 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
userUUID := uuid.GetUUID(req.UserUUID)
|
||||
tokenData, err := e.services.Tokenizer.Parse(req.Token)
|
||||
if err != nil || req.Reset { // Starting the new one
|
||||
dice := byte(rand.Intn(100)) // [0, 100)
|
||||
dice := byte(rand.Intn(100))
|
||||
// Use condition rate if it's set
|
||||
if req.Condition != "" {
|
||||
rate, err := e.services.Conditions.GetRate(p.ProjectID, req.Condition, int(p.SampleRate))
|
||||
if err != nil {
|
||||
log.Printf("can't get condition rate: %s", err)
|
||||
e.log.Warn(r.Context(), "can't get condition rate, condition: %s, err: %s", req.Condition, err)
|
||||
} else {
|
||||
p.SampleRate = byte(rate)
|
||||
}
|
||||
} else {
|
||||
log.Printf("project sample rate: %d", p.SampleRate)
|
||||
}
|
||||
if dice >= p.SampleRate {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"), startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("capture rate miss"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
startTimeMili := startTime.UnixMilli()
|
||||
sessionID, err := e.services.Flaker.Compose(uint64(startTimeMili))
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
|
||||
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
|
||||
tokenData = &token.TokenData{
|
||||
ID: sessionID,
|
||||
|
|
@ -195,6 +198,9 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
ExpTime: expTime.UnixMilli(),
|
||||
}
|
||||
|
||||
// Add sessionID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionID)))
|
||||
|
||||
if !req.DoNotRecord {
|
||||
sessionStart := &SessionStart{
|
||||
Timestamp: getSessionTimestamp(req, startTimeMili),
|
||||
|
|
@ -239,12 +245,12 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
UserDeviceHeapSize: sessionStart.UserDeviceHeapSize,
|
||||
UserID: &sessionStart.UserID,
|
||||
}); err != nil {
|
||||
log.Printf("can't insert session start: %s", err)
|
||||
e.log.Warn(r.Context(), "can't insert sessionStart to DB: %s", err)
|
||||
}
|
||||
|
||||
// Send sessionStart message to kafka
|
||||
if err := e.services.Producer.Produce(e.cfg.TopicRawWeb, tokenData.ID, sessionStart.Encode()); err != nil {
|
||||
log.Printf("can't send session start: %s", err)
|
||||
e.log.Error(r.Context(), "can't send sessionStart to queue: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -252,7 +258,7 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
// Save information about session beacon size
|
||||
e.addBeaconSize(tokenData.ID, p.BeaconSize)
|
||||
|
||||
ResponseWithJSON(w, &StartSessionResponse{
|
||||
e.ResponseWithJSON(r.Context(), w, &StartSessionResponse{
|
||||
Token: e.services.Tokenizer.Compose(*tokenData),
|
||||
UserUUID: userUUID,
|
||||
UserOS: ua.OS,
|
||||
|
|
@ -280,68 +286,86 @@ func (e *Router) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
// Check authorization
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Add sessionID and projectID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
|
||||
if info, err := e.services.Sessions.Get(sessionData.ID); err == nil {
|
||||
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
|
||||
}
|
||||
|
||||
// Check request body
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
bodyBytes, err := e.readBody(w, r, e.getBeaconSize(sessionData.ID))
|
||||
if err != nil {
|
||||
log.Printf("error while reading request body: %s", err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
bodySize = len(bodyBytes)
|
||||
|
||||
// Send processed messages to queue as array of bytes
|
||||
// TODO: check bytes for nonsense crap
|
||||
err = e.services.Producer.Produce(e.cfg.TopicRawWeb, sessionData.ID, bodyBytes)
|
||||
if err != nil {
|
||||
log.Printf("can't send processed messages to queue: %s", err)
|
||||
e.log.Error(r.Context(), "can't send messages batch to queue: %s", err)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, errors.New("can't save message, try again"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
ResponseOK(w, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
|
||||
func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
bodySize := 0
|
||||
|
||||
// Check request body
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
|
||||
if err != nil {
|
||||
log.Printf("error while reading request body: %s", err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
bodySize = len(bodyBytes)
|
||||
|
||||
// Parse request body
|
||||
req := &NotStartedRequest{}
|
||||
|
||||
if err := json.Unmarshal(bodyBytes, req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Add tracker version to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "tracker", req.TrackerVersion))
|
||||
|
||||
// Handler's logic
|
||||
if req.ProjectKey == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("projectKey value required"), startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("projectKey value required"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
ua := e.services.UaParser.ParseFromHTTPRequest(r) // TODO?: insert anyway
|
||||
p, err := e.services.Projects.GetProjectByKey(*req.ProjectKey)
|
||||
if err != nil {
|
||||
if postgres.IsNoRowsErr(err) {
|
||||
e.ResponseWithError(r.Context(), w, http.StatusNotFound, errors.New("project doesn't exist"), startTime, r.URL.Path, bodySize)
|
||||
} else {
|
||||
e.log.Error(r.Context(), "can't find a project: %s", err)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, errors.New("can't find a project"), startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Add projectID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", p.ProjectID)))
|
||||
|
||||
ua := e.services.UaParser.ParseFromHTTPRequest(r)
|
||||
if ua == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
geoInfo := e.ExtractGeoData(r)
|
||||
|
|
@ -362,10 +386,10 @@ func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
|||
UserCity: geoInfo.City,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("Unable to insert Unstarted Session: %v\n", err)
|
||||
e.log.Warn(r.Context(), "can't insert un-started session: %s", err)
|
||||
}
|
||||
|
||||
ResponseOK(w, startTime, r.URL.Path, bodySize)
|
||||
// response ok anyway
|
||||
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
|
||||
func (e *Router) featureFlagsHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -373,44 +397,46 @@ func (e *Router) featureFlagsHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
bodySize := 0
|
||||
|
||||
// Check authorization
|
||||
_, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
info, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Check request body
|
||||
// Add sessionID and projectID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", info.ID)))
|
||||
if info, err := e.services.Sessions.Get(info.ID); err == nil {
|
||||
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
|
||||
}
|
||||
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
|
||||
if err != nil {
|
||||
log.Printf("error while reading request body: %s", err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
bodySize = len(bodyBytes)
|
||||
|
||||
// Parse request body
|
||||
req := &featureflags.FeatureFlagsRequest{}
|
||||
|
||||
if err := json.Unmarshal(bodyBytes, req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
computedFlags, err := e.services.FeatureFlags.ComputeFlagsForSession(req)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
resp := &featureflags.FeatureFlagsResponse{
|
||||
Flags: computedFlags,
|
||||
}
|
||||
ResponseWithJSON(w, resp, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithJSON(r.Context(), w, resp, startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
|
||||
func (e *Router) getUXTestInfo(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -420,10 +446,22 @@ func (e *Router) getUXTestInfo(w http.ResponseWriter, r *http.Request) {
|
|||
// Check authorization
|
||||
sessInfo, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Add sessionID and projectID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessInfo.ID)))
|
||||
|
||||
sess, err := e.services.Sessions.Get(sessInfo.ID)
|
||||
if err != nil {
|
||||
e.ResponseWithError(r.Context(), w, http.StatusForbidden, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Add projectID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", sess.ProjectID)))
|
||||
|
||||
// Get taskID
|
||||
vars := mux.Vars(r)
|
||||
id := vars["id"]
|
||||
|
|
@ -431,22 +469,17 @@ func (e *Router) getUXTestInfo(w http.ResponseWriter, r *http.Request) {
|
|||
// Get task info
|
||||
info, err := e.services.UXTesting.GetInfo(id)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
sess, err := e.services.Sessions.Get(sessInfo.ID)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusForbidden, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
if sess.ProjectID != info.ProjectID {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("project mismatch"), startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("project mismatch"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
type TaskInfoResponse struct {
|
||||
Task *uxtesting.UXTestInfo `json:"test"`
|
||||
}
|
||||
ResponseWithJSON(w, &TaskInfoResponse{Task: info}, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithJSON(r.Context(), w, &TaskInfoResponse{Task: info}, startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
|
||||
func (e *Router) sendUXTestSignal(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -456,14 +489,19 @@ func (e *Router) sendUXTestSignal(w http.ResponseWriter, r *http.Request) {
|
|||
// Check authorization
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Add sessionID and projectID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
|
||||
if info, err := e.services.Sessions.Get(sessionData.ID); err == nil {
|
||||
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
|
||||
}
|
||||
|
||||
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
|
||||
if err != nil {
|
||||
log.Printf("error while reading request body: %s", err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
bodySize = len(bodyBytes)
|
||||
|
|
@ -472,17 +510,17 @@ func (e *Router) sendUXTestSignal(w http.ResponseWriter, r *http.Request) {
|
|||
req := &uxtesting.TestSignal{}
|
||||
|
||||
if err := json.Unmarshal(bodyBytes, req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
req.SessionID = sessionData.ID
|
||||
|
||||
// Save test signal
|
||||
if err := e.services.UXTesting.SetTestSignal(req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
ResponseOK(w, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
|
||||
func (e *Router) sendUXTaskSignal(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -492,14 +530,19 @@ func (e *Router) sendUXTaskSignal(w http.ResponseWriter, r *http.Request) {
|
|||
// Check authorization
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Add sessionID and projectID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
|
||||
if info, err := e.services.Sessions.Get(sessionData.ID); err == nil {
|
||||
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
|
||||
}
|
||||
|
||||
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
|
||||
if err != nil {
|
||||
log.Printf("error while reading request body: %s", err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
bodySize = len(bodyBytes)
|
||||
|
|
@ -508,17 +551,17 @@ func (e *Router) sendUXTaskSignal(w http.ResponseWriter, r *http.Request) {
|
|||
req := &uxtesting.TaskSignal{}
|
||||
|
||||
if err := json.Unmarshal(bodyBytes, req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
req.SessionID = sessionData.ID
|
||||
|
||||
// Save test signal
|
||||
if err := e.services.UXTesting.SetTaskSignal(req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
ResponseOK(w, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
|
||||
func (e *Router) getUXUploadUrl(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -528,20 +571,26 @@ func (e *Router) getUXUploadUrl(w http.ResponseWriter, r *http.Request) {
|
|||
// Check authorization
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Add sessionID and projectID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
|
||||
if info, err := e.services.Sessions.Get(sessionData.ID); err == nil {
|
||||
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("%d/ux_webcam_record.webm", sessionData.ID)
|
||||
url, err := e.services.ObjStorage.GetPreSignedUploadUrl(key)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
type UrlResponse struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
ResponseWithJSON(w, &UrlResponse{URL: url}, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithJSON(r.Context(), w, &UrlResponse{URL: url}, startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
|
||||
type ScreenshotMessage struct {
|
||||
|
|
@ -554,12 +603,18 @@ func (e *Router) imagesUploaderHandlerWeb(w http.ResponseWriter, r *http.Request
|
|||
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil { // Should accept expired token?
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Add sessionID and projectID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
|
||||
if info, err := e.services.Sessions.Get(sessionData.ID); err == nil {
|
||||
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
|
||||
}
|
||||
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
r.Body = http.MaxBytesReader(w, r.Body, e.cfg.FileSizeLimit)
|
||||
|
|
@ -568,10 +623,10 @@ func (e *Router) imagesUploaderHandlerWeb(w http.ResponseWriter, r *http.Request
|
|||
// Parse the multipart form
|
||||
err = r.ParseMultipartForm(10 << 20) // Max upload size 10 MB
|
||||
if err == http.ErrNotMultipart || err == http.ErrMissingBoundary {
|
||||
ResponseWithError(w, http.StatusUnsupportedMediaType, err, startTime, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusUnsupportedMediaType, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
} else if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -580,7 +635,7 @@ func (e *Router) imagesUploaderHandlerWeb(w http.ResponseWriter, r *http.Request
|
|||
for _, fileHeader := range fileHeaderList {
|
||||
file, err := fileHeader.Open()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -588,13 +643,12 @@ func (e *Router) imagesUploaderHandlerWeb(w http.ResponseWriter, r *http.Request
|
|||
fileBytes, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
file.Close()
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
file.Close()
|
||||
|
||||
fileName := util.SafeString(fileHeader.Filename)
|
||||
log.Printf("fileName: %s, fileSize: %d", fileName, len(fileBytes))
|
||||
|
||||
// Create a message to send to Kafka
|
||||
msg := ScreenshotMessage{
|
||||
|
|
@ -603,17 +657,17 @@ func (e *Router) imagesUploaderHandlerWeb(w http.ResponseWriter, r *http.Request
|
|||
}
|
||||
data, err := json.Marshal(&msg)
|
||||
if err != nil {
|
||||
log.Printf("can't marshal screenshot message, err: %s", err)
|
||||
e.log.Warn(r.Context(), "can't marshal screenshot message, err: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Send the message to queue
|
||||
if err := e.services.Producer.Produce(e.cfg.TopicCanvasImages, sessionData.ID, data); err != nil {
|
||||
log.Printf("failed to produce canvas image message: %v", err)
|
||||
e.log.Warn(r.Context(), "can't send screenshot message to queue, err: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
ResponseOK(w, startTime, r.URL.Path, 0)
|
||||
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, 0)
|
||||
}
|
||||
|
||||
func (e *Router) getTags(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -623,23 +677,27 @@ func (e *Router) getTags(w http.ResponseWriter, r *http.Request) {
|
|||
// Check authorization
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
sessInfo, err := e.services.Sessions.Get(sessionData.ID)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Add sessionID and projectID to context
|
||||
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
|
||||
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", sessInfo.ProjectID)))
|
||||
|
||||
// Get tags
|
||||
tags, err := e.services.Tags.Get(sessInfo.ProjectID)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
type UrlResponse struct {
|
||||
Tags interface{} `json:"tags"`
|
||||
}
|
||||
ResponseWithJSON(w, &UrlResponse{Tags: tags}, startTime, r.URL.Path, bodySize)
|
||||
e.ResponseWithJSON(r.Context(), w, &UrlResponse{Tags: tags}, startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
package router
|
||||
|
||||
import (
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
)
|
||||
|
||||
func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) {
|
||||
|
|
@ -18,27 +18,22 @@ func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID
|
|||
|
||||
switch r.Header.Get("Content-Encoding") {
|
||||
case "gzip":
|
||||
log.Println("Gzip", reader)
|
||||
|
||||
reader, err = gzip.NewReader(body)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, start, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
log.Println("Gzip reader init", reader)
|
||||
defer reader.Close()
|
||||
default:
|
||||
reader = body
|
||||
}
|
||||
log.Println("Reader after switch:", reader)
|
||||
buf, err := io.ReadAll(reader)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, start, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
log.Println("Produce message: ", buf, string(buf))
|
||||
if err := e.services.Producer.Produce(topicName, sessionID, buf); err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0)
|
||||
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, start, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
package router
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
|
|
@ -17,18 +17,20 @@ func recordMetrics(requestStart time.Time, url string, code, bodySize int) {
|
|||
metrics.RecordRequestDuration(float64(time.Now().Sub(requestStart).Milliseconds()), url, code)
|
||||
}
|
||||
|
||||
func ResponseOK(w http.ResponseWriter, requestStart time.Time, url string, bodySize int) {
|
||||
func (e *Router) ResponseOK(ctx context.Context, w http.ResponseWriter, requestStart time.Time, url string, bodySize int) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
e.log.Info(ctx, "response ok")
|
||||
recordMetrics(requestStart, url, http.StatusOK, bodySize)
|
||||
}
|
||||
|
||||
func ResponseWithJSON(w http.ResponseWriter, res interface{}, requestStart time.Time, url string, bodySize int) {
|
||||
func (e *Router) ResponseWithJSON(ctx context.Context, w http.ResponseWriter, res interface{}, requestStart time.Time, url string, bodySize int) {
|
||||
body, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
e.log.Error(ctx, "can't marshal response: %s", err)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(body)
|
||||
e.log.Info(ctx, "response ok")
|
||||
recordMetrics(requestStart, url, http.StatusOK, bodySize)
|
||||
}
|
||||
|
||||
|
|
@ -36,12 +38,13 @@ type response struct {
|
|||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func ResponseWithError(w http.ResponseWriter, code int, err error, requestStart time.Time, url string, bodySize int) {
|
||||
func (e *Router) ResponseWithError(ctx context.Context, w http.ResponseWriter, code int, err error, requestStart time.Time, url string, bodySize int) {
|
||||
body, err := json.Marshal(&response{err.Error()})
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
e.log.Error(ctx, "can't marshal response: %s", err)
|
||||
}
|
||||
w.WriteHeader(code)
|
||||
w.Write(body)
|
||||
e.log.Error(ctx, "response error, code: %d, error: %s", code, err)
|
||||
recordMetrics(requestStart, url, code, bodySize)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,11 +2,12 @@ package router
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/docker/distribution/context"
|
||||
"github.com/tomasen/realip"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"openreplay/backend/internal/http/geoip"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
|
@ -22,25 +23,29 @@ type BeaconSize struct {
|
|||
}
|
||||
|
||||
type Router struct {
|
||||
router *mux.Router
|
||||
log logger.Logger
|
||||
cfg *http3.Config
|
||||
services *http2.ServicesBuilder
|
||||
router *mux.Router
|
||||
mutex *sync.RWMutex
|
||||
services *http2.ServicesBuilder
|
||||
beaconSizeCache map[uint64]*BeaconSize // Cache for session's beaconSize
|
||||
compressionThreshold int64
|
||||
}
|
||||
|
||||
func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder) (*Router, error) {
|
||||
func NewRouter(cfg *http3.Config, log logger.Logger, services *http2.ServicesBuilder) (*Router, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
case services == nil:
|
||||
return nil, fmt.Errorf("services is empty")
|
||||
case log == nil:
|
||||
return nil, fmt.Errorf("logger is empty")
|
||||
}
|
||||
e := &Router{
|
||||
log: log,
|
||||
cfg: cfg,
|
||||
services: services,
|
||||
mutex: &sync.RWMutex{},
|
||||
services: services,
|
||||
beaconSizeCache: make(map[uint64]*BeaconSize),
|
||||
compressionThreshold: cfg.CompressionThreshold,
|
||||
}
|
||||
|
|
@ -91,7 +96,11 @@ func (e *Router) clearBeaconSizes() {
|
|||
|
||||
func (e *Router) ExtractGeoData(r *http.Request) *geoip.GeoRecord {
|
||||
ip := net.ParseIP(realip.FromRequest(r))
|
||||
return e.services.GeoIP.Parse(ip)
|
||||
geoRec, err := e.services.GeoIP.Parse(ip)
|
||||
if err != nil {
|
||||
e.log.Warn(r.Context(), "failed to parse geo data: %v", err)
|
||||
}
|
||||
return geoRec
|
||||
}
|
||||
|
||||
func (e *Router) init() {
|
||||
|
|
@ -152,8 +161,7 @@ func (e *Router) corsMiddleware(next http.Handler) http.Handler {
|
|||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("Request: %v - %v ", r.Method, util.SafeString(r.URL.Path))
|
||||
r = r.WithContext(context.WithValues(r.Context(), map[string]interface{}{"httpMethod": r.Method, "url": util.SafeString(r.URL.Path)}))
|
||||
|
||||
// Serve request
|
||||
next.ServeHTTP(w, r)
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"golang.org/x/net/http2"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
|
@ -29,9 +28,7 @@ func New(handler http.Handler, host, port string, timeout time.Duration) (*Serve
|
|||
ReadTimeout: timeout,
|
||||
WriteTimeout: timeout,
|
||||
}
|
||||
if err := http2.ConfigureServer(server, nil); err != nil {
|
||||
log.Printf("can't configure http server: %s", err)
|
||||
}
|
||||
http2.ConfigureServer(server, nil)
|
||||
return &Server{
|
||||
server: server,
|
||||
}, nil
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package services
|
||||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/internal/config/http"
|
||||
"openreplay/backend/internal/http/geoip"
|
||||
"openreplay/backend/internal/http/uaparser"
|
||||
|
|
@ -10,6 +9,7 @@ import (
|
|||
"openreplay/backend/pkg/db/redis"
|
||||
"openreplay/backend/pkg/featureflags"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
"openreplay/backend/pkg/objectstorage/store"
|
||||
"openreplay/backend/pkg/projects"
|
||||
|
|
@ -35,25 +35,33 @@ type ServicesBuilder struct {
|
|||
Conditions conditions.Conditions
|
||||
}
|
||||
|
||||
func New(cfg *http.Config, producer types.Producer, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) {
|
||||
projs := projects.New(pgconn, redis)
|
||||
func New(log logger.Logger, cfg *http.Config, producer types.Producer, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) {
|
||||
projs := projects.New(log, pgconn, redis)
|
||||
// ObjectStorage client to generate pre-signed upload urls
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("can't init object storage: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
geoModule, err := geoip.New(cfg.MaxMinDBFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
uaModule, err := uaparser.NewUAParser(cfg.UAParserFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ServicesBuilder{
|
||||
Projects: projs,
|
||||
Sessions: sessions.New(pgconn, projs, redis),
|
||||
Sessions: sessions.New(log, pgconn, projs, redis),
|
||||
FeatureFlags: featureflags.New(pgconn),
|
||||
Producer: producer,
|
||||
Tokenizer: token.NewTokenizer(cfg.TokenSecret),
|
||||
UaParser: uaparser.NewUAParser(cfg.UAParserFile),
|
||||
GeoIP: geoip.New(cfg.MaxMinDBFile),
|
||||
UaParser: uaModule,
|
||||
GeoIP: geoModule,
|
||||
Flaker: flakeid.NewFlaker(cfg.WorkerID),
|
||||
ObjStorage: objStore,
|
||||
UXTesting: uxtesting.New(pgconn),
|
||||
Tags: tags.New(pgconn),
|
||||
Tags: tags.New(log, pgconn),
|
||||
Conditions: conditions.New(pgconn),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package uaparser
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/ua-parser/uap-go/uaparser"
|
||||
|
|
@ -11,12 +10,12 @@ type UAParser struct {
|
|||
p *uaparser.Parser
|
||||
}
|
||||
|
||||
func NewUAParser(regexFile string) *UAParser {
|
||||
func NewUAParser(regexFile string) (*UAParser, error) {
|
||||
p, err := uaparser.New(regexFile)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
return nil, err
|
||||
}
|
||||
return &UAParser{p}
|
||||
return &UAParser{p}, nil
|
||||
}
|
||||
|
||||
type UA struct {
|
||||
|
|
|
|||
|
|
@ -3,9 +3,10 @@ package screenshot_handler
|
|||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
"openreplay/backend/pkg/pool"
|
||||
"os"
|
||||
|
|
@ -18,11 +19,13 @@ import (
|
|||
)
|
||||
|
||||
type saveTask struct {
|
||||
ctx context.Context
|
||||
sessionID uint64
|
||||
images map[string]*bytes.Buffer
|
||||
}
|
||||
|
||||
type uploadTask struct {
|
||||
ctx context.Context
|
||||
sessionID string
|
||||
path string
|
||||
name string
|
||||
|
|
@ -30,18 +33,20 @@ type uploadTask struct {
|
|||
|
||||
type ImageStorage struct {
|
||||
cfg *config.Config
|
||||
log logger.Logger
|
||||
objStorage objectstorage.ObjectStorage
|
||||
saverPool pool.WorkerPool
|
||||
uploaderPool pool.WorkerPool
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, objStorage objectstorage.ObjectStorage) (*ImageStorage, error) {
|
||||
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage) (*ImageStorage, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
}
|
||||
s := &ImageStorage{
|
||||
cfg: cfg,
|
||||
log: log,
|
||||
objStorage: objStorage,
|
||||
}
|
||||
s.saverPool = pool.NewPool(4, 8, s.writeToDisk)
|
||||
|
|
@ -54,7 +59,7 @@ func (v *ImageStorage) Wait() {
|
|||
v.uploaderPool.Pause()
|
||||
}
|
||||
|
||||
func (v *ImageStorage) Process(sessID uint64, data []byte) error {
|
||||
func (v *ImageStorage) Process(ctx context.Context, sessID uint64, data []byte) error {
|
||||
start := time.Now()
|
||||
images := make(map[string]*bytes.Buffer)
|
||||
uncompressedStream, err := gzip.NewReader(bytes.NewReader(data))
|
||||
|
|
@ -79,12 +84,12 @@ func (v *ImageStorage) Process(sessID uint64, data []byte) error {
|
|||
}
|
||||
images[header.Name] = &buf
|
||||
} else {
|
||||
log.Printf("ExtractTarGz: uknown type: %d in %s", header.Typeflag, header.Name)
|
||||
v.log.Error(ctx, "ExtractTarGz: unknown type: %d in %s", header.Typeflag, header.Name)
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("sessID: %d, arch size: %d, extracted archive in: %s", sessID, len(data), time.Since(start))
|
||||
v.saverPool.Submit(&saveTask{sessionID: sessID, images: images})
|
||||
v.log.Info(ctx, "arch size: %d, extracted archive in: %s", len(data), time.Since(start))
|
||||
v.saverPool.Submit(&saveTask{ctx: ctx, sessionID: sessID, images: images})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -100,7 +105,7 @@ func (v *ImageStorage) writeToDisk(payload interface{}) {
|
|||
|
||||
// Ensure the directory exists
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
log.Fatalf("Error creating directories: %v", err)
|
||||
v.log.Fatal(task.ctx, "Error creating directories: %v", err)
|
||||
}
|
||||
|
||||
// Write images to disk
|
||||
|
|
@ -108,19 +113,19 @@ func (v *ImageStorage) writeToDisk(payload interface{}) {
|
|||
for name, img := range task.images {
|
||||
outFile, err := os.Create(path + name) // or open file in rewrite mode
|
||||
if err != nil {
|
||||
log.Printf("can't create file: %s", err.Error())
|
||||
v.log.Error(task.ctx, "can't create file: %s", err.Error())
|
||||
}
|
||||
if _, err := io.Copy(outFile, img); err != nil {
|
||||
log.Printf("can't copy file: %s", err.Error())
|
||||
v.log.Error(task.ctx, "can't copy file: %s", err.Error())
|
||||
}
|
||||
outFile.Close()
|
||||
saved++
|
||||
}
|
||||
log.Printf("saved %d images to disk", saved)
|
||||
v.log.Info(task.ctx, "saved %d images to disk", saved)
|
||||
return
|
||||
}
|
||||
|
||||
func (v *ImageStorage) PackScreenshots(sessID uint64, filesPath string) error {
|
||||
func (v *ImageStorage) PackScreenshots(ctx context.Context, sessID uint64, filesPath string) error {
|
||||
// Temporarily disabled for tests
|
||||
if v.objStorage == nil {
|
||||
return fmt.Errorf("object storage is empty")
|
||||
|
|
@ -139,12 +144,11 @@ func (v *ImageStorage) PackScreenshots(sessID uint64, filesPath string) error {
|
|||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
log.Printf("Failed to execute command: %v, stderr: %v", err, stderr.String())
|
||||
return err
|
||||
return fmt.Errorf("failed to execute command: %v, stderr: %v", err, stderr.String())
|
||||
}
|
||||
log.Printf("packed replay in %v", time.Since(start))
|
||||
v.log.Info(ctx, "packed replay in %v", time.Since(start))
|
||||
|
||||
v.uploaderPool.Submit(&uploadTask{sessionID: sessionID, path: archPath, name: sessionID + "/replay.tar.zst"})
|
||||
v.uploaderPool.Submit(&uploadTask{ctx: ctx, sessionID: sessionID, path: archPath, name: sessionID + "/replay.tar.zst"})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -153,11 +157,11 @@ func (v *ImageStorage) sendToS3(payload interface{}) {
|
|||
start := time.Now()
|
||||
video, err := os.ReadFile(task.path)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read video file: %v", err)
|
||||
v.log.Fatal(task.ctx, "failed to read replay file: %s", err)
|
||||
}
|
||||
if err := v.objStorage.Upload(bytes.NewReader(video), task.name, "application/octet-stream", objectstorage.Zstd); err != nil {
|
||||
log.Fatalf("Storage: start uploading replay failed. %s", err)
|
||||
v.log.Fatal(task.ctx, "failed to upload replay file: %s", err)
|
||||
}
|
||||
log.Printf("Replay file (size: %d) uploaded successfully in %v", len(video), time.Since(start))
|
||||
v.log.Info(task.ctx, "replay file (size: %d) uploaded successfully in %v", len(video), time.Since(start))
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package sessionender
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/messages"
|
||||
|
|
@ -63,8 +62,6 @@ func (se *SessionEnder) ActivePartitions(parts []uint64) {
|
|||
activeSessions++
|
||||
}
|
||||
}
|
||||
log.Printf("SessionEnder: %d sessions left in active partitions: %+v, removed %d sessions",
|
||||
activeSessions, parts, removedSessions)
|
||||
}
|
||||
|
||||
// UpdateSession save timestamp for new sessions and update for existing sessions
|
||||
|
|
@ -77,10 +74,8 @@ func (se *SessionEnder) UpdateSession(msg messages.Message) {
|
|||
)
|
||||
if messages.IsIOSType(msg.TypeID()) {
|
||||
msgTimestamp = messages.GetTimestamp(msg)
|
||||
log.Printf("got timestamp from iOS message, session: %d, ts: %d", msg.SessionID(), msgTimestamp)
|
||||
}
|
||||
if batchTimestamp == 0 {
|
||||
log.Printf("got empty timestamp for sessionID: %d", sessionID)
|
||||
return
|
||||
}
|
||||
se.timeCtrl.UpdateTime(sessionID, batchTimestamp, localTimestamp)
|
||||
|
|
@ -113,11 +108,10 @@ func (se *SessionEnder) UpdateSession(msg messages.Message) {
|
|||
// HandleEndedSessions runs handler for each ended session and delete information about session in successful case
|
||||
func (se *SessionEnder) HandleEndedSessions(handler EndedSessionHandler) {
|
||||
if !se.enabled {
|
||||
log.Printf("SessionEnder is disabled")
|
||||
return
|
||||
}
|
||||
currTime := time.Now().UnixMilli()
|
||||
allSessions, removedSessions := len(se.sessions), 0
|
||||
removedSessions := 0
|
||||
brokerTime := make(map[int]int, 0)
|
||||
serverTime := make(map[int]int, 0)
|
||||
|
||||
|
|
@ -154,11 +148,7 @@ func (se *SessionEnder) HandleEndedSessions(handler EndedSessionHandler) {
|
|||
if endCase == 3 {
|
||||
serverTime[1]++
|
||||
}
|
||||
} else {
|
||||
log.Printf("sessID: %d, userTime: %d", sessID, sess.lastUserTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Printf("Removed %d of %d sessions; brokerTime: %d, serverTime: %d",
|
||||
removedSessions, allSessions, brokerTime, serverTime)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,17 +1,18 @@
|
|||
package assetscache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"io"
|
||||
"log"
|
||||
"net/url"
|
||||
metrics "openreplay/backend/pkg/metrics/sink"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/config/sink"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
metrics "openreplay/backend/pkg/metrics/sink"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
)
|
||||
|
|
@ -22,6 +23,7 @@ type CachedAsset struct {
|
|||
}
|
||||
|
||||
type AssetsCache struct {
|
||||
log logger.Logger
|
||||
mutex sync.RWMutex
|
||||
cfg *sink.Config
|
||||
rewriter *assets.Rewriter
|
||||
|
|
@ -30,8 +32,9 @@ type AssetsCache struct {
|
|||
blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain
|
||||
}
|
||||
|
||||
func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer) *AssetsCache {
|
||||
func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer) *AssetsCache {
|
||||
assetsCache := &AssetsCache{
|
||||
log: log,
|
||||
cfg: cfg,
|
||||
rewriter: rewriter,
|
||||
producer: producer,
|
||||
|
|
@ -76,7 +79,7 @@ func (e *AssetsCache) clearCache() {
|
|||
metrics.DecreaseCachedAssets()
|
||||
}
|
||||
}
|
||||
log.Printf("cache cleaner: deleted %d/%d assets", deleted, cacheSize)
|
||||
e.log.Info(context.Background(), "cache cleaner: deleted %d/%d assets", deleted, cacheSize)
|
||||
}
|
||||
|
||||
func (e *AssetsCache) shouldSkipAsset(baseURL string) bool {
|
||||
|
|
@ -172,7 +175,8 @@ func (e *AssetsCache) sendAssetForCache(sessionID uint64, baseURL string, relati
|
|||
sessionID,
|
||||
assetMessage.Encode(),
|
||||
); err != nil {
|
||||
log.Printf("can't send asset to cache topic, sessID: %d, err: %s", sessionID, err)
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sessionID)
|
||||
e.log.Error(ctx, "can't send asset to cache topic, sessID: %d, err: %s", sessionID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -207,7 +211,8 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
|||
// Cut first part of url (scheme + host)
|
||||
justUrl, err := parseHost(baseURL)
|
||||
if err != nil {
|
||||
log.Printf("can't parse url: %s, err: %s", baseURL, err)
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sessionID)
|
||||
e.log.Error(ctx, "can't parse url: %s, err: %s", baseURL, err)
|
||||
if e.cfg.CacheAssets {
|
||||
e.sendAssetsForCacheFromCSS(sessionID, baseURL, css)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package sessionwriter
|
|||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
|
|
@ -34,7 +33,6 @@ func (f *File) Write(data []byte) error {
|
|||
if err == nil {
|
||||
break
|
||||
}
|
||||
log.Printf("can't flush buffer: %s", err)
|
||||
}
|
||||
// Write big message directly to file
|
||||
return f.write(f.file, data)
|
||||
|
|
|
|||
|
|
@ -1,13 +1,16 @@
|
|||
package sessionwriter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type SessionWriter struct {
|
||||
log logger.Logger
|
||||
filesLimit int
|
||||
workingDir string
|
||||
fileBuffer int
|
||||
|
|
@ -18,8 +21,9 @@ type SessionWriter struct {
|
|||
stopped chan struct{}
|
||||
}
|
||||
|
||||
func NewWriter(filesLimit uint16, workingDir string, fileBuffer int, syncTimeout int) *SessionWriter {
|
||||
func NewWriter(log logger.Logger, filesLimit uint16, workingDir string, fileBuffer int, syncTimeout int) *SessionWriter {
|
||||
w := &SessionWriter{
|
||||
log: log,
|
||||
filesLimit: int(filesLimit) / 2, // should divide by 2 because each session has 2 files
|
||||
workingDir: workingDir + "/",
|
||||
fileBuffer: fileBuffer,
|
||||
|
|
@ -48,7 +52,8 @@ func (w *SessionWriter) Write(sid uint64, domBuffer, devBuffer []byte) (err erro
|
|||
// Check opened sessions limit and close extra session if you need to
|
||||
if extraSessID := w.meta.GetExtra(); extraSessID != 0 {
|
||||
if err := w.Close(extraSessID); err != nil {
|
||||
log.Printf("can't close session: %s", err)
|
||||
ctx := context.WithValue(context.Background(), "sessionID", extraSessID)
|
||||
w.log.Error(ctx, "can't close session: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -95,7 +100,8 @@ func (w *SessionWriter) Info() string {
|
|||
func (w *SessionWriter) Sync() {
|
||||
w.sessions.Range(func(sid, lockObj any) bool {
|
||||
if err := w.sync(sid.(uint64)); err != nil {
|
||||
log.Printf("can't sync file descriptor: %s", err)
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sid)
|
||||
w.log.Error(ctx, "can't sync session: %s", err)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
|
@ -110,7 +116,8 @@ func (w *SessionWriter) synchronizer() {
|
|||
case <-w.done:
|
||||
w.sessions.Range(func(sid, lockObj any) bool {
|
||||
if err := w.Close(sid.(uint64)); err != nil {
|
||||
log.Printf("can't close file descriptor: %s", err)
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sid)
|
||||
w.log.Error(ctx, "can't close session: %s", err)
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"log"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
|
@ -35,9 +35,9 @@ func (c *logCounter) Update(sessID uint64, ts time.Time) {
|
|||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *logCounter) Print() {
|
||||
func (c *logCounter) Log() string {
|
||||
c.mu.Lock()
|
||||
log.Printf("count: %d, dur: %ds, msgTS: %s, sessID: %d, part: %d",
|
||||
res := fmt.Sprintf("count: %d, dur: %ds, msgTS: %s, sessID: %d, part: %d",
|
||||
c.counter,
|
||||
int(time.Now().Sub(c.timestamp).Seconds()),
|
||||
c.lastTS.String(),
|
||||
|
|
@ -46,4 +46,5 @@ func (c *logCounter) Print() {
|
|||
)
|
||||
c.mu.Unlock()
|
||||
c.init()
|
||||
return res
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,22 +2,24 @@ package storage
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/andybalholm/brotli"
|
||||
"io"
|
||||
"log"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/andybalholm/brotli"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
|
||||
config "openreplay/backend/internal/config/storage"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
metrics "openreplay/backend/pkg/metrics/storage"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
)
|
||||
|
||||
type FileType string
|
||||
|
|
@ -35,6 +37,7 @@ func (t FileType) String() string {
|
|||
}
|
||||
|
||||
type Task struct {
|
||||
ctx context.Context
|
||||
id string
|
||||
key string
|
||||
domRaw []byte
|
||||
|
|
@ -72,6 +75,7 @@ func NewBreakTask() *Task {
|
|||
|
||||
type Storage struct {
|
||||
cfg *config.Config
|
||||
log logger.Logger
|
||||
objStorage objectstorage.ObjectStorage
|
||||
startBytes []byte
|
||||
compressionTasks chan *Task // brotli compression or gzip compression with encryption
|
||||
|
|
@ -79,7 +83,7 @@ type Storage struct {
|
|||
workersStopped chan struct{}
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, objStorage objectstorage.ObjectStorage) (*Storage, error) {
|
||||
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage) (*Storage, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
|
|
@ -88,6 +92,7 @@ func New(cfg *config.Config, objStorage objectstorage.ObjectStorage) (*Storage,
|
|||
}
|
||||
newStorage := &Storage{
|
||||
cfg: cfg,
|
||||
log: log,
|
||||
objStorage: objStorage,
|
||||
startBytes: make([]byte, cfg.FileSplitSize),
|
||||
compressionTasks: make(chan *Task, 1),
|
||||
|
|
@ -106,16 +111,17 @@ func (s *Storage) Wait() {
|
|||
<-s.workersStopped
|
||||
}
|
||||
|
||||
func (s *Storage) Process(msg *messages.SessionEnd) (err error) {
|
||||
func (s *Storage) Process(ctx context.Context, msg *messages.SessionEnd) (err error) {
|
||||
// Generate file path
|
||||
sessionID := strconv.FormatUint(msg.SessionID(), 10)
|
||||
filePath := s.cfg.FSDir + "/" + sessionID
|
||||
|
||||
// Prepare sessions
|
||||
newTask := &Task{
|
||||
ctx: ctx,
|
||||
id: sessionID,
|
||||
key: msg.EncryptionKey,
|
||||
compression: s.setTaskCompression(),
|
||||
compression: s.setTaskCompression(ctx),
|
||||
}
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
|
|
@ -134,7 +140,7 @@ func (s *Storage) Process(msg *messages.SessionEnd) (err error) {
|
|||
wg.Wait()
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "big file") {
|
||||
log.Printf("%s, sess: %d", err, msg.SessionID())
|
||||
s.log.Warn(ctx, "can't process session: %s", err)
|
||||
metrics.IncreaseStorageTotalSkippedSessions()
|
||||
return nil
|
||||
}
|
||||
|
|
@ -146,7 +152,7 @@ func (s *Storage) Process(msg *messages.SessionEnd) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) openSession(sessID, filePath string, tp FileType) ([]byte, error) {
|
||||
func (s *Storage) openSession(ctx context.Context, sessID, filePath string, tp FileType) ([]byte, error) {
|
||||
if tp == DEV {
|
||||
filePath += "devtools"
|
||||
}
|
||||
|
|
@ -165,7 +171,7 @@ func (s *Storage) openSession(sessID, filePath string, tp FileType) ([]byte, err
|
|||
return raw, nil
|
||||
}
|
||||
start := time.Now()
|
||||
res, err := s.sortSessionMessages(sessID, raw)
|
||||
res, err := s.sortSessionMessages(ctx, sessID, raw)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't sort session, err: %s", err)
|
||||
}
|
||||
|
|
@ -173,12 +179,16 @@ func (s *Storage) openSession(sessID, filePath string, tp FileType) ([]byte, err
|
|||
return res, nil
|
||||
}
|
||||
|
||||
func (s *Storage) sortSessionMessages(sessID string, raw []byte) ([]byte, error) {
|
||||
func (s *Storage) sortSessionMessages(ctx context.Context, sessID string, raw []byte) ([]byte, error) {
|
||||
// Parse messages, sort by index and save result into slice of bytes
|
||||
unsortedMessages, err := messages.SplitMessages(sessID, raw)
|
||||
unsortedMessages, err := messages.SplitMessages(raw)
|
||||
if err != nil {
|
||||
log.Printf("can't sort session, err: %s", err)
|
||||
return raw, nil
|
||||
if err.Error() == "session has duplicate messages" {
|
||||
s.log.Warn(ctx, err.Error())
|
||||
} else {
|
||||
s.log.Error(ctx, "can't split session messages: %s", err)
|
||||
return raw, nil
|
||||
}
|
||||
}
|
||||
return messages.MergeMessages(raw, messages.SortMessages(unsortedMessages)), nil
|
||||
}
|
||||
|
|
@ -186,7 +196,7 @@ func (s *Storage) sortSessionMessages(sessID string, raw []byte) ([]byte, error)
|
|||
func (s *Storage) prepareSession(path string, tp FileType, task *Task) error {
|
||||
// Open session file
|
||||
startRead := time.Now()
|
||||
mob, err := s.openSession(task.id, path, tp)
|
||||
mob, err := s.openSession(task.ctx, task.id, path, tp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -198,7 +208,7 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *Storage) setTaskCompression() objectstorage.CompressionType {
|
||||
func (s *Storage) setTaskCompression(ctx context.Context) objectstorage.CompressionType {
|
||||
switch s.cfg.CompressionAlgo {
|
||||
case "none":
|
||||
return objectstorage.NoCompression
|
||||
|
|
@ -209,7 +219,7 @@ func (s *Storage) setTaskCompression() objectstorage.CompressionType {
|
|||
case "zstd":
|
||||
return objectstorage.Zstd
|
||||
default:
|
||||
log.Printf("unknown compression algorithm: %s", s.cfg.CompressionAlgo)
|
||||
s.log.Warn(ctx, "unknown compression algorithm: %s", s.cfg.CompressionAlgo)
|
||||
return objectstorage.NoCompression
|
||||
}
|
||||
}
|
||||
|
|
@ -222,12 +232,12 @@ func (s *Storage) packSession(task *Task, tp FileType) {
|
|||
if tp == DEV || len(mob) <= s.cfg.FileSplitSize {
|
||||
// Compression
|
||||
start := time.Now()
|
||||
data := s.compress(mob, task.compression)
|
||||
data := s.compress(task.ctx, mob, task.compression)
|
||||
metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
|
||||
// Encryption
|
||||
start = time.Now()
|
||||
result := s.encryptSession(data.Bytes(), task.key)
|
||||
result := s.encryptSession(task.ctx, data.Bytes(), task.key)
|
||||
metrics.RecordSessionEncryptionDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
|
||||
if tp == DOM {
|
||||
|
|
@ -249,12 +259,12 @@ func (s *Storage) packSession(task *Task, tp FileType) {
|
|||
go func() {
|
||||
// Compression
|
||||
start := time.Now()
|
||||
data := s.compress(mob[:s.cfg.FileSplitSize], task.compression)
|
||||
data := s.compress(task.ctx, mob[:s.cfg.FileSplitSize], task.compression)
|
||||
firstPart = time.Since(start).Milliseconds()
|
||||
|
||||
// Encryption
|
||||
start = time.Now()
|
||||
task.doms = bytes.NewBuffer(s.encryptSession(data.Bytes(), task.key))
|
||||
task.doms = bytes.NewBuffer(s.encryptSession(task.ctx, data.Bytes(), task.key))
|
||||
firstEncrypt = time.Since(start).Milliseconds()
|
||||
|
||||
// Record dom start raw size
|
||||
|
|
@ -268,12 +278,12 @@ func (s *Storage) packSession(task *Task, tp FileType) {
|
|||
go func() {
|
||||
// Compression
|
||||
start := time.Now()
|
||||
data := s.compress(mob[s.cfg.FileSplitSize:], task.compression)
|
||||
data := s.compress(task.ctx, mob[s.cfg.FileSplitSize:], task.compression)
|
||||
secondPart = time.Since(start).Milliseconds()
|
||||
|
||||
// Encryption
|
||||
start = time.Now()
|
||||
task.dome = bytes.NewBuffer(s.encryptSession(data.Bytes(), task.key))
|
||||
task.dome = bytes.NewBuffer(s.encryptSession(task.ctx, data.Bytes(), task.key))
|
||||
secondEncrypt = time.Since(start).Milliseconds()
|
||||
|
||||
// Record dom end raw size
|
||||
|
|
@ -289,72 +299,72 @@ func (s *Storage) packSession(task *Task, tp FileType) {
|
|||
metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String())
|
||||
}
|
||||
|
||||
func (s *Storage) encryptSession(data []byte, encryptionKey string) []byte {
|
||||
func (s *Storage) encryptSession(ctx context.Context, data []byte, encryptionKey string) []byte {
|
||||
if encryptionKey == "" {
|
||||
// no encryption, just return the same data
|
||||
return data
|
||||
}
|
||||
encryptedData, err := EncryptData(data, []byte(encryptionKey))
|
||||
if err != nil {
|
||||
log.Printf("can't encrypt data: %s", err)
|
||||
s.log.Error(ctx, "can't encrypt data: %s", err)
|
||||
encryptedData = data
|
||||
}
|
||||
return encryptedData
|
||||
}
|
||||
|
||||
func (s *Storage) compress(data []byte, compressionType objectstorage.CompressionType) *bytes.Buffer {
|
||||
func (s *Storage) compress(ctx context.Context, data []byte, compressionType objectstorage.CompressionType) *bytes.Buffer {
|
||||
switch compressionType {
|
||||
case objectstorage.Gzip:
|
||||
return s.compressGzip(data)
|
||||
return s.compressGzip(ctx, data)
|
||||
case objectstorage.Brotli:
|
||||
return s.compressBrotli(data)
|
||||
return s.compressBrotli(ctx, data)
|
||||
case objectstorage.Zstd:
|
||||
return s.compressZstd(data)
|
||||
return s.compressZstd(ctx, data)
|
||||
default:
|
||||
// no compression, just return the same data
|
||||
return bytes.NewBuffer(data)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Storage) compressGzip(data []byte) *bytes.Buffer {
|
||||
func (s *Storage) compressGzip(ctx context.Context, data []byte) *bytes.Buffer {
|
||||
zippedMob := new(bytes.Buffer)
|
||||
z, _ := gzip.NewWriterLevel(zippedMob, gzip.DefaultCompression)
|
||||
if _, err := z.Write(data); err != nil {
|
||||
log.Printf("can't write session data to compressor: %s", err)
|
||||
s.log.Error(ctx, "can't write session data to compressor: %s", err)
|
||||
}
|
||||
if err := z.Close(); err != nil {
|
||||
log.Printf("can't close compressor: %s", err)
|
||||
s.log.Error(ctx, "can't close compressor: %s", err)
|
||||
}
|
||||
return zippedMob
|
||||
}
|
||||
|
||||
func (s *Storage) compressBrotli(data []byte) *bytes.Buffer {
|
||||
func (s *Storage) compressBrotli(ctx context.Context, data []byte) *bytes.Buffer {
|
||||
out := bytes.Buffer{}
|
||||
writer := brotli.NewWriterOptions(&out, brotli.WriterOptions{Quality: brotli.DefaultCompression})
|
||||
in := bytes.NewReader(data)
|
||||
n, err := io.Copy(writer, in)
|
||||
if err != nil {
|
||||
log.Printf("can't write session data to compressor: %s", err)
|
||||
s.log.Error(ctx, "can't write session data to compressor: %s", err)
|
||||
}
|
||||
|
||||
if int(n) != len(data) {
|
||||
log.Printf("wrote less data than expected: %d vs %d", n, len(data))
|
||||
s.log.Error(ctx, "wrote less data than expected: %d vs %d", n, len(data))
|
||||
}
|
||||
|
||||
if err := writer.Close(); err != nil {
|
||||
log.Printf("can't close compressor: %s", err)
|
||||
s.log.Error(ctx, "can't close compressor: %s", err)
|
||||
}
|
||||
return &out
|
||||
}
|
||||
|
||||
func (s *Storage) compressZstd(data []byte) *bytes.Buffer {
|
||||
func (s *Storage) compressZstd(ctx context.Context, data []byte) *bytes.Buffer {
|
||||
var out bytes.Buffer
|
||||
w, _ := zstd.NewWriter(&out)
|
||||
if _, err := w.Write(data); err != nil {
|
||||
log.Printf("can't write session data to compressor: %s", err)
|
||||
s.log.Error(ctx, "can't write session data to compressor: %s", err)
|
||||
}
|
||||
if err := w.Close(); err != nil {
|
||||
log.Printf("can't close compressor: %s", err)
|
||||
s.log.Error(ctx, "can't close compressor: %s", err)
|
||||
}
|
||||
return &out
|
||||
}
|
||||
|
|
@ -374,7 +384,7 @@ func (s *Storage) uploadSession(task *Task) {
|
|||
// Upload session to s3
|
||||
start := time.Now()
|
||||
if err := s.objStorage.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", task.compression); err != nil {
|
||||
log.Fatalf("Storage: start upload failed. %s", err)
|
||||
s.log.Fatal(task.ctx, "failed to upload mob file, err: %s", err)
|
||||
}
|
||||
uploadDoms = time.Now().Sub(start).Milliseconds()
|
||||
}
|
||||
|
|
@ -387,7 +397,7 @@ func (s *Storage) uploadSession(task *Task) {
|
|||
// Upload session to s3
|
||||
start := time.Now()
|
||||
if err := s.objStorage.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", task.compression); err != nil {
|
||||
log.Fatalf("Storage: start upload failed. %s", err)
|
||||
s.log.Fatal(task.ctx, "failed to upload mob file, err: %s", err)
|
||||
}
|
||||
uploadDome = time.Now().Sub(start).Milliseconds()
|
||||
}
|
||||
|
|
@ -400,7 +410,7 @@ func (s *Storage) uploadSession(task *Task) {
|
|||
// Upload session to s3
|
||||
start := time.Now()
|
||||
if err := s.objStorage.Upload(task.dev, task.id+string(DEV), "application/octet-stream", task.compression); err != nil {
|
||||
log.Fatalf("Storage: start upload failed. %s", err)
|
||||
s.log.Fatal(task.ctx, "failed to upload mob file, err: %s", err)
|
||||
}
|
||||
uploadDev = time.Now().Sub(start).Milliseconds()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
package builders
|
||||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/handlers"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/handlers"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
|
@ -35,20 +35,19 @@ func (b *builder) checkSessionEnd(message Message) {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *builder) handleMessage(m Message) {
|
||||
func (b *builder) handleMessage(m Message) error {
|
||||
if m.MsgID() < b.lastMessageID {
|
||||
// May happen in case of duplicated messages in kafka (if `idempotence: false`)
|
||||
log.Printf("skip message with wrong msgID, sessID: %d, msgID: %d, lastID: %d", b.sessionID, m.MsgID(), b.lastMessageID)
|
||||
return
|
||||
return fmt.Errorf("skip message with wrong msgID: %d, lastID: %d", m.MsgID(), b.lastMessageID)
|
||||
}
|
||||
if m.Time() <= 0 {
|
||||
switch m.(type) {
|
||||
case *IssueEvent, *PerformanceTrackAggr:
|
||||
break
|
||||
default:
|
||||
log.Printf("skip message with incorrect timestamp, sessID: %d, msgID: %d, msgType: %d", b.sessionID, m.MsgID(), m.TypeID())
|
||||
return fmt.Errorf("skip message with incorrect timestamp, msgID: %d, msgType: %d", m.MsgID(), m.TypeID())
|
||||
}
|
||||
return
|
||||
return nil
|
||||
}
|
||||
if m.Time() > b.timestamp {
|
||||
b.timestamp = m.Time()
|
||||
|
|
@ -62,4 +61,5 @@ func (b *builder) handleMessage(m Message) {
|
|||
}
|
||||
}
|
||||
b.checkSessionEnd(m)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
package builders
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
|
@ -12,6 +13,7 @@ import (
|
|||
const ForceDeleteTimeout = 30 * time.Minute
|
||||
|
||||
type builderMap struct {
|
||||
log logger.Logger
|
||||
handlersFabric func() []handlers.MessageProcessor
|
||||
sessions map[uint64]*builder
|
||||
mutex *sync.Mutex
|
||||
|
|
@ -25,8 +27,9 @@ type EventBuilder interface {
|
|||
Stop()
|
||||
}
|
||||
|
||||
func NewBuilderMap(handlersFabric func() []handlers.MessageProcessor) EventBuilder {
|
||||
func NewBuilderMap(log logger.Logger, handlersFabric func() []handlers.MessageProcessor) EventBuilder {
|
||||
b := &builderMap{
|
||||
log: log,
|
||||
handlersFabric: handlersFabric,
|
||||
sessions: make(map[uint64]*builder),
|
||||
mutex: &sync.Mutex{},
|
||||
|
|
@ -53,7 +56,10 @@ func (m *builderMap) Events() chan Message {
|
|||
}
|
||||
|
||||
func (m *builderMap) HandleMessage(msg Message) {
|
||||
m.getBuilder(msg.SessionID()).handleMessage(msg)
|
||||
if err := m.getBuilder(msg.SessionID()).handleMessage(msg); err != nil {
|
||||
ctx := context.WithValue(context.Background(), "sessionID", msg.SessionID())
|
||||
m.log.Error(ctx, "can't handle message: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *builderMap) worker() {
|
||||
|
|
@ -87,9 +93,6 @@ func (m *builderMap) checkSessions() {
|
|||
}
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
if deleted > 0 {
|
||||
log.Printf("deleted %d sessions from message builder", deleted)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *builderMap) Stop() {
|
||||
|
|
|
|||
|
|
@ -1,13 +1,14 @@
|
|||
package batch
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
)
|
||||
|
||||
|
|
@ -49,16 +50,20 @@ func NewBatchesTask(size int) *batchesTask {
|
|||
}
|
||||
|
||||
type BatchSet struct {
|
||||
log logger.Logger
|
||||
c pool.Pool
|
||||
ctx context.Context
|
||||
batches map[uint64]*SessionBatch
|
||||
workerTask chan *batchesTask
|
||||
done chan struct{}
|
||||
finished chan struct{}
|
||||
}
|
||||
|
||||
func NewBatchSet(c pool.Pool) *BatchSet {
|
||||
func NewBatchSet(log logger.Logger, c pool.Pool) *BatchSet {
|
||||
bs := &BatchSet{
|
||||
log: log,
|
||||
c: c,
|
||||
ctx: context.Background(),
|
||||
batches: make(map[uint64]*SessionBatch),
|
||||
workerTask: make(chan *batchesTask, 1),
|
||||
done: make(chan struct{}),
|
||||
|
|
@ -108,10 +113,10 @@ func (conn *BatchSet) sendBatches(t *batchesTask) {
|
|||
l := batch.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
if _, err := br.Exec(); err != nil {
|
||||
log.Printf("Error in PG batch: %v \n", err)
|
||||
conn.log.Error(conn.ctx, "Error in PG batch: %v", err)
|
||||
failedSql := batch.items[i]
|
||||
query := strings.ReplaceAll(failedSql.query, "\n", " ")
|
||||
log.Println("failed sql req:", query, failedSql.arguments)
|
||||
conn.log.Error(conn.ctx, "failed sql req: %s", query)
|
||||
}
|
||||
}
|
||||
br.Close() // returns err
|
||||
|
|
|
|||
|
|
@ -1,8 +1,10 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type bulksTask struct {
|
||||
|
|
@ -14,7 +16,9 @@ func NewBulksTask() *bulksTask {
|
|||
}
|
||||
|
||||
type BulkSet struct {
|
||||
log logger.Logger
|
||||
c pool.Pool
|
||||
ctx context.Context
|
||||
autocompletes Bulk
|
||||
requests Bulk
|
||||
customEvents Bulk
|
||||
|
|
@ -36,9 +40,11 @@ type BulkSet struct {
|
|||
finished chan struct{}
|
||||
}
|
||||
|
||||
func NewBulkSet(c pool.Pool) *BulkSet {
|
||||
func NewBulkSet(log logger.Logger, c pool.Pool) *BulkSet {
|
||||
bs := &BulkSet{
|
||||
log: log,
|
||||
c: c,
|
||||
ctx: context.Background(),
|
||||
workerTask: make(chan *bulksTask, 1),
|
||||
done: make(chan struct{}),
|
||||
finished: make(chan struct{}),
|
||||
|
|
@ -95,7 +101,7 @@ func (conn *BulkSet) initBulks() {
|
|||
"($%d, $%d, $%d)",
|
||||
3, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create autocomplete bulk: %s", err)
|
||||
conn.log.Fatal(conn.ctx, "can't create autocomplete bulk: %s", err)
|
||||
}
|
||||
conn.requests, err = NewBulk(conn.c,
|
||||
"events_common.requests",
|
||||
|
|
@ -103,7 +109,7 @@ func (conn *BulkSet) initBulks() {
|
|||
"($%d, $%d, $%d, LEFT($%d, 8000), $%d, $%d)",
|
||||
6, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create requests bulk: %s", err)
|
||||
conn.log.Fatal(conn.ctx, "can't create requests bulk: %s", err)
|
||||
}
|
||||
conn.customEvents, err = NewBulk(conn.c,
|
||||
"events_common.customs",
|
||||
|
|
@ -111,7 +117,7 @@ func (conn *BulkSet) initBulks() {
|
|||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d)",
|
||||
5, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create customEvents bulk: %s", err)
|
||||
conn.log.Fatal(conn.ctx, "can't create customEvents bulk: %s", err)
|
||||
}
|
||||
conn.webPageEvents, err = NewBulk(conn.c,
|
||||
"events.pages",
|
||||
|
|
@ -123,7 +129,7 @@ func (conn *BulkSet) initBulks() {
|
|||
" NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0))",
|
||||
18, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webPageEvents bulk: %s", err)
|
||||
conn.log.Fatal(conn.ctx, "can't create webPageEvents bulk: %s", err)
|
||||
}
|
||||
conn.webInputDurations, err = NewBulk(conn.c,
|
||||
"events.inputs",
|
||||
|
|
@ -131,7 +137,7 @@ func (conn *BulkSet) initBulks() {
|
|||
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000),''), $%d, $%d)",
|
||||
6, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webPageEvents bulk: %s", err)
|
||||
conn.log.Fatal(conn.ctx, "can't create webInputDurations bulk: %s", err)
|
||||
}
|
||||
conn.webGraphQL, err = NewBulk(conn.c,
|
||||
"events.graphql",
|
||||
|
|
@ -139,7 +145,7 @@ func (conn *BulkSet) initBulks() {
|
|||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
|
||||
6, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webPageEvents bulk: %s", err)
|
||||
conn.log.Fatal(conn.ctx, "can't create webGraphQL bulk: %s", err)
|
||||
}
|
||||
conn.webErrors, err = NewBulk(conn.c,
|
||||
"errors",
|
||||
|
|
@ -147,7 +153,7 @@ func (conn *BulkSet) initBulks() {
|
|||
"($%d, $%d, $%d, $%d, $%d, $%d::jsonb)",
|
||||
6, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webErrors bulk: %s", err)
|
||||
conn.log.Fatal(conn.ctx, "can't create webErrors bulk: %s", err)
|
||||
}
|
||||
conn.webErrorEvents, err = NewBulk(conn.c,
|
||||
"events.errors",
|
||||
|
|
@ -155,7 +161,7 @@ func (conn *BulkSet) initBulks() {
|
|||
"($%d, $%d, $%d, $%d)",
|
||||
4, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webErrorEvents bulk: %s", err)
|
||||
conn.log.Fatal(conn.ctx, "can't create webErrorEvents bulk: %s", err)
|
||||
}
|
||||
conn.webErrorTags, err = NewBulk(conn.c,
|
||||
"public.errors_tags",
|
||||
|
|
@ -163,7 +169,7 @@ func (conn *BulkSet) initBulks() {
|
|||
"($%d, $%d, $%d, $%d, $%d)",
|
||||
5, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webErrorEvents bulk: %s", err)
|
||||
conn.log.Fatal(conn.ctx, "can't create webErrorTags bulk: %s", err)
|
||||
}
|
||||
conn.webIssues, err = NewBulk(conn.c,
|
||||
"issues",
|
||||
|
|
@ -171,7 +177,7 @@ func (conn *BulkSet) initBulks() {
|
|||
"($%d, $%d, $%d, $%d)",
|
||||
4, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webIssues bulk: %s", err)
|
||||
conn.log.Fatal(conn.ctx, "can't create webIssues bulk: %s", err)
|
||||
}
|
||||
conn.webIssueEvents, err = NewBulk(conn.c,
|
||||
"events_common.issues",
|
||||
|
|
@ -179,7 +185,7 @@ func (conn *BulkSet) initBulks() {
|
|||
"($%d, $%d, $%d, $%d, CAST($%d AS jsonb))",
|
||||
5, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webIssueEvents bulk: %s", err)
|
||||
conn.log.Fatal(conn.ctx, "can't create webIssueEvents bulk: %s", err)
|
||||
}
|
||||
conn.webCustomEvents, err = NewBulk(conn.c,
|
||||
"events_common.customs",
|
||||
|
|
@ -187,7 +193,7 @@ func (conn *BulkSet) initBulks() {
|
|||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
|
||||
6, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webCustomEvents bulk: %s", err)
|
||||
conn.log.Fatal(conn.ctx, "can't create webCustomEvents bulk: %s", err)
|
||||
}
|
||||
conn.webClickEvents, err = NewBulk(conn.c,
|
||||
"events.clicks",
|
||||
|
|
@ -195,7 +201,7 @@ func (conn *BulkSet) initBulks() {
|
|||
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d)",
|
||||
8, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webClickEvents bulk: %s", err)
|
||||
conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err)
|
||||
}
|
||||
conn.webNetworkRequest, err = NewBulk(conn.c,
|
||||
"events_common.requests",
|
||||
|
|
@ -203,20 +209,23 @@ func (conn *BulkSet) initBulks() {
|
|||
"($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d, $%d)",
|
||||
14, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webNetworkRequest bulk: %s", err)
|
||||
conn.log.Fatal(conn.ctx, "can't create webNetworkRequest bulk: %s", err)
|
||||
}
|
||||
conn.webCanvasNodes, err = NewBulk(conn.c,
|
||||
"events.canvas_recordings",
|
||||
"(session_id, recording_id, timestamp)",
|
||||
"($%d, $%d, $%d)",
|
||||
3, 200)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webCanvasNodes bulk: %s", err)
|
||||
}
|
||||
conn.webTagTriggers, err = NewBulk(conn.c,
|
||||
"events.tags",
|
||||
"(session_id, timestamp, seq_index, tag_id)",
|
||||
"($%d, $%d, $%d, $%d)",
|
||||
4, 200)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create webCanvasNodes bulk: %s", err)
|
||||
conn.log.Fatal(conn.ctx, "can't create webTagTriggers bulk: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -255,7 +264,7 @@ func (conn *BulkSet) Stop() {
|
|||
func (conn *BulkSet) sendBulks(t *bulksTask) {
|
||||
for _, bulk := range t.bulks {
|
||||
if err := bulk.Send(); err != nil {
|
||||
log.Printf("%s bulk send err: %s", bulk.Table(), err)
|
||||
conn.log.Error(conn.ctx, "bulk send err: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,9 +1,11 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres/batch"
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
)
|
||||
|
||||
|
|
@ -13,6 +15,7 @@ type CH interface {
|
|||
|
||||
// Conn contains batches, bulks and cache for all sessions
|
||||
type Conn struct {
|
||||
log logger.Logger
|
||||
Pool pool.Pool
|
||||
batches *batch.BatchSet
|
||||
bulks *BulkSet
|
||||
|
|
@ -23,14 +26,15 @@ func (conn *Conn) SetClickHouse(ch CH) {
|
|||
conn.chConn = ch
|
||||
}
|
||||
|
||||
func NewConn(pool pool.Pool) *Conn {
|
||||
func NewConn(log logger.Logger, pool pool.Pool) *Conn {
|
||||
if pool == nil {
|
||||
log.Fatalf("pool is nil")
|
||||
log.Fatal(context.Background(), "pg pool is empty")
|
||||
}
|
||||
return &Conn{
|
||||
log: log,
|
||||
Pool: pool,
|
||||
bulks: NewBulkSet(pool),
|
||||
batches: batch.NewBatchSet(pool),
|
||||
bulks: NewBulkSet(log, pool),
|
||||
batches: batch.NewBatchSet(log, pool),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -45,14 +49,14 @@ func (conn *Conn) InsertAutocompleteValue(sessionID uint64, projectID uint32, tp
|
|||
return
|
||||
}
|
||||
if err := conn.bulks.Get("autocompletes").Append(value, tp, projectID); err != nil {
|
||||
log.Printf("autocomplete bulk err: %s", err)
|
||||
conn.log.Error(context.Background(), "can't add autocomplete to PG, err: %s", err)
|
||||
}
|
||||
if conn.chConn == nil {
|
||||
return
|
||||
}
|
||||
// Send autocomplete data to clickhouse
|
||||
if err := conn.chConn.InsertAutocomplete(&sessions.Session{SessionID: sessionID, ProjectID: projectID}, tp, value); err != nil {
|
||||
log.Printf("click house autocomplete err: %s", err)
|
||||
conn.log.Error(context.Background(), "can't add autocomplete to CH, err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"openreplay/backend/internal/http/geoip"
|
||||
|
|
@ -68,6 +68,7 @@ func (conn *Conn) InsertCustomEvent(sessionID uint64, timestamp uint64, index ui
|
|||
}
|
||||
|
||||
func (conn *Conn) InsertIssueEvent(sess *sessions.Session, e *messages.IssueEvent) error {
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sess.SessionID)
|
||||
issueID := hashid.IssueID(sess.ProjectID, e)
|
||||
payload := &e.Payload
|
||||
if *payload == "" || *payload == "{}" {
|
||||
|
|
@ -75,19 +76,19 @@ func (conn *Conn) InsertIssueEvent(sess *sessions.Session, e *messages.IssueEven
|
|||
}
|
||||
|
||||
if e.Type == "app_crash" {
|
||||
log.Printf("app crash event: %+v", e)
|
||||
conn.log.Warn(ctx, "app crash event: %+v", e)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := conn.bulks.Get("webIssues").Append(sess.ProjectID, issueID, e.Type, e.ContextString); err != nil {
|
||||
log.Printf("insert web issue err: %s", err)
|
||||
conn.log.Error(ctx, "insert web issue err: %s", err)
|
||||
}
|
||||
if err := conn.bulks.Get("webIssueEvents").Append(sess.SessionID, issueID, e.Timestamp, truncSqIdx(e.MessageID), payload); err != nil {
|
||||
log.Printf("insert web issue event err: %s", err)
|
||||
conn.log.Error(ctx, "insert web issue event err: %s", err)
|
||||
}
|
||||
if e.Type == "custom" {
|
||||
if err := conn.bulks.Get("webCustomEvents").Append(sess.SessionID, truncSqIdx(e.MessageID), e.Timestamp, e.ContextString, e.Payload, "error"); err != nil {
|
||||
log.Printf("insert web custom event err: %s", err)
|
||||
conn.log.Error(ctx, "insert web custom event err: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
@ -116,7 +117,8 @@ func (conn *Conn) InsertWebPageEvent(sess *sessions.Session, e *messages.PageEve
|
|||
if err = conn.bulks.Get("webPageEvents").Append(sess.SessionID, truncSqIdx(e.MessageID), e.Timestamp, e.Referrer, url.DiscardURLQuery(e.Referrer),
|
||||
host, path, query, e.DomContentLoadedEventEnd, e.LoadEventEnd, e.ResponseEnd, e.FirstPaint, e.FirstContentfulPaint,
|
||||
e.SpeedIndex, e.VisuallyComplete, e.TimeToInteractive, calcResponseTime(e), calcDomBuildingTime(e)); err != nil {
|
||||
log.Printf("insert web page event in bulk err: %s", err)
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", sess.SessionID)
|
||||
conn.log.Error(sessCtx, "insert web page event in bulk err: %s", err)
|
||||
}
|
||||
// Add new value set to autocomplete bulk
|
||||
conn.InsertAutocompleteValue(sess.SessionID, sess.ProjectID, "LOCATION", url.DiscardURLQuery(path))
|
||||
|
|
@ -131,7 +133,8 @@ func (conn *Conn) InsertWebClickEvent(sess *sessions.Session, e *messages.MouseC
|
|||
var host, path string
|
||||
host, path, _, _ = url.GetURLParts(e.Url)
|
||||
if err := conn.bulks.Get("webClickEvents").Append(sess.SessionID, truncSqIdx(e.MsgID()), e.Timestamp, e.Label, e.Selector, host+path, path, e.HesitationTime); err != nil {
|
||||
log.Printf("insert web click err: %s", err)
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", sess.SessionID)
|
||||
conn.log.Error(sessCtx, "insert web click event in bulk err: %s", err)
|
||||
}
|
||||
// Add new value set to autocomplete bulk
|
||||
conn.InsertAutocompleteValue(sess.SessionID, sess.ProjectID, "CLICK", e.Label)
|
||||
|
|
@ -149,23 +152,28 @@ func (conn *Conn) InsertInputChangeEvent(sess *sessions.Session, e *messages.Inp
|
|||
e.InputDuration = 0
|
||||
}
|
||||
if err := conn.bulks.Get("webInputDurations").Append(sess.SessionID, truncSqIdx(e.ID), e.Timestamp, e.Label, e.HesitationTime, e.InputDuration); err != nil {
|
||||
log.Printf("insert web input event err: %s", err)
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", sess.SessionID)
|
||||
conn.log.Error(sessCtx, "insert web input duration in bulk err: %s", err)
|
||||
}
|
||||
conn.InsertAutocompleteValue(sess.SessionID, sess.ProjectID, "INPUT", e.Label)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertWebErrorEvent(sess *sessions.Session, e *types.ErrorEvent) error {
|
||||
errorID := e.ID(sess.ProjectID)
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", sess.SessionID)
|
||||
errorID, err := e.ID(sess.ProjectID)
|
||||
if err != nil {
|
||||
conn.log.Warn(sessCtx, "id generation failed: %s", err)
|
||||
}
|
||||
if err := conn.bulks.Get("webErrors").Append(errorID, sess.ProjectID, e.Source, e.Name, e.Message, e.Payload); err != nil {
|
||||
log.Printf("insert web error err: %s", err)
|
||||
conn.log.Error(sessCtx, "insert web error err: %s", err)
|
||||
}
|
||||
if err := conn.bulks.Get("webErrorEvents").Append(sess.SessionID, truncSqIdx(e.MessageID), e.Timestamp, errorID); err != nil {
|
||||
log.Printf("insert web error event err: %s", err)
|
||||
conn.log.Error(sessCtx, "insert web error event err: %s", err)
|
||||
}
|
||||
for key, value := range e.Tags {
|
||||
if err := conn.bulks.Get("webErrorTags").Append(sess.SessionID, truncSqIdx(e.MessageID), errorID, key, value); err != nil {
|
||||
log.Printf("insert web error token err: %s", err)
|
||||
conn.log.Error(sessCtx, "insert web error token err: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
@ -194,19 +202,21 @@ func (conn *Conn) InsertWebGraphQL(sess *sessions.Session, e *messages.GraphQL)
|
|||
response = &e.Response
|
||||
}
|
||||
if err := conn.bulks.Get("webGraphQL").Append(sess.SessionID, e.Meta().Timestamp, truncSqIdx(e.Meta().Index), e.OperationName, request, response); err != nil {
|
||||
log.Printf("insert web graphQL event err: %s", err)
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", sess.SessionID)
|
||||
conn.log.Error(sessCtx, "insert web graphQL in bulk err: %s", err)
|
||||
}
|
||||
conn.InsertAutocompleteValue(sess.SessionID, sess.ProjectID, "GRAPHQL", e.OperationName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertMouseThrashing(sess *sessions.Session, e *messages.MouseThrashing) error {
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", sess.SessionID)
|
||||
issueID := hashid.MouseThrashingID(sess.ProjectID, sess.SessionID, e.Timestamp)
|
||||
if err := conn.bulks.Get("webIssues").Append(sess.ProjectID, issueID, "mouse_thrashing", e.Url); err != nil {
|
||||
log.Printf("insert web issue err: %s", err)
|
||||
conn.log.Error(sessCtx, "insert mouse thrashing issue err: %s", err)
|
||||
}
|
||||
if err := conn.bulks.Get("webIssueEvents").Append(sess.SessionID, issueID, e.Timestamp, truncSqIdx(e.MsgID()), nil); err != nil {
|
||||
log.Printf("insert web issue event err: %s", err)
|
||||
conn.log.Error(sessCtx, "insert mouse thrashing issue event err: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -214,14 +224,16 @@ func (conn *Conn) InsertMouseThrashing(sess *sessions.Session, e *messages.Mouse
|
|||
func (conn *Conn) InsertCanvasNode(sess *sessions.Session, m *messages.CanvasNode) error {
|
||||
canvasID := fmt.Sprintf("%d_%s", m.Timestamp, m.NodeId)
|
||||
if err := conn.bulks.Get("canvasNodes").Append(sess.SessionID, canvasID, m.Timestamp); err != nil {
|
||||
log.Printf("insert canvas node %s to db, err: %s", canvasID, err)
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", sess.SessionID)
|
||||
conn.log.Error(sessCtx, "insert canvas node in bulk err: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertTagTrigger(sess *sessions.Session, m *messages.TagTrigger) error {
|
||||
if err := conn.bulks.Get("tagTriggers").Append(sess.SessionID, m.Timestamp, truncSqIdx(m.Index), m.TagId); err != nil {
|
||||
log.Printf("insert tag trigger %d to db, err: %s", m.TagId, err)
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", sess.SessionID)
|
||||
conn.log.Error(sessCtx, "insert tag trigger %d to db, err: %s", m.TagId, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,23 +1,14 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
|
||||
"openreplay/backend/pkg/hashid"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
"openreplay/backend/pkg/url"
|
||||
)
|
||||
|
||||
func (conn *Conn) InsertIOSSessionStart(sessionID uint64, e *messages.IOSSessionStart) error {
|
||||
log.Printf("handle ios session %d start: %v", sessionID, e)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSSessionEnd(sessionID uint64, e *messages.IOSSessionEnd) error {
|
||||
log.Printf("handle ios session %d end: %v", sessionID, e)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSEvent(session *sessions.Session, e *messages.IOSEvent) error {
|
||||
if err := conn.InsertCustomEvent(session.SessionID, e.Timestamp, truncSqIdx(e.Index), e.Name, e.Payload); err != nil {
|
||||
return err
|
||||
|
|
@ -112,11 +103,12 @@ func (conn *Conn) InsertIOSIssueEvent(sess *sessions.Session, e *messages.IOSIss
|
|||
payload = nil
|
||||
}
|
||||
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sess.SessionID)
|
||||
if err := conn.bulks.Get("webIssues").Append(sess.ProjectID, issueID, e.Type, e.ContextString); err != nil {
|
||||
log.Printf("insert web issue err: %s", err)
|
||||
conn.log.Error(ctx, "can't add web issue to PG, err: %s", err)
|
||||
}
|
||||
if err := conn.bulks.Get("webIssueEvents").Append(sess.SessionID, issueID, e.Timestamp, truncSqIdx(e.Index), payload); err != nil {
|
||||
log.Printf("insert web issue event err: %s", err)
|
||||
conn.log.Error(ctx, "can't add web issue event to PG, err: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
|
@ -20,7 +19,6 @@ type Pool interface {
|
|||
Exec(sql string, arguments ...interface{}) error
|
||||
SendBatch(b *pgx.Batch) pgx.BatchResults
|
||||
Begin() (*_Tx, error)
|
||||
IsConnected() bool
|
||||
Close()
|
||||
}
|
||||
|
||||
|
|
@ -29,12 +27,6 @@ type poolImpl struct {
|
|||
conn *pgxpool.Pool
|
||||
}
|
||||
|
||||
func (p *poolImpl) IsConnected() bool {
|
||||
stat := p.conn.Stat()
|
||||
log.Println("stat: ", stat.AcquireCount(), stat.IdleConns(), stat.MaxConns(), stat.TotalConns())
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *poolImpl) Query(sql string, args ...interface{}) (pgx.Rows, error) {
|
||||
start := time.Now()
|
||||
res, err := p.conn.Query(getTimeoutContext(), sql, args...)
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"log"
|
||||
"strconv"
|
||||
|
||||
. "openreplay/backend/pkg/messages"
|
||||
|
|
@ -58,11 +57,8 @@ func parseTags(tagsJSON string) (tags map[string]*string, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func WrapJSException(m *JSException) *ErrorEvent {
|
||||
func WrapJSException(m *JSException) (*ErrorEvent, error) {
|
||||
meta, err := parseTags(m.Metadata)
|
||||
if err != nil {
|
||||
log.Printf("Error on parsing Exception metadata: %v", err)
|
||||
}
|
||||
return &ErrorEvent{
|
||||
MessageID: m.Meta().Index,
|
||||
Timestamp: m.Meta().Timestamp,
|
||||
|
|
@ -71,7 +67,7 @@ func WrapJSException(m *JSException) *ErrorEvent {
|
|||
Message: m.Message,
|
||||
Payload: m.Payload,
|
||||
Tags: meta,
|
||||
}
|
||||
}, err
|
||||
}
|
||||
|
||||
func WrapIntegrationEvent(m *IntegrationEvent) *ErrorEvent {
|
||||
|
|
@ -102,7 +98,8 @@ func parseFirstFrame(payload string) (*stackFrame, error) {
|
|||
return frames[0], nil
|
||||
}
|
||||
|
||||
func (e *ErrorEvent) ID(projectID uint32) string {
|
||||
func (e *ErrorEvent) ID(projectID uint32) (string, error) {
|
||||
var idErr error
|
||||
hash := fnv.New128a()
|
||||
hash.Write([]byte(e.Source))
|
||||
hash.Write([]byte(e.Name))
|
||||
|
|
@ -110,7 +107,7 @@ func (e *ErrorEvent) ID(projectID uint32) string {
|
|||
if e.Source == SOURCE_JS {
|
||||
frame, err := parseFirstFrame(e.Payload)
|
||||
if err != nil {
|
||||
log.Printf("Can't parse stackframe ((( %v ))): %v", e.Payload, err)
|
||||
idErr = fmt.Errorf("can't parse stackframe ((( %v ))): %v", e.Payload, err)
|
||||
}
|
||||
if frame != nil {
|
||||
hash.Write([]byte(frame.FileName))
|
||||
|
|
@ -118,7 +115,7 @@ func (e *ErrorEvent) ID(projectID uint32) string {
|
|||
hash.Write([]byte(strconv.Itoa(frame.ColNo)))
|
||||
}
|
||||
}
|
||||
return strconv.FormatUint(uint64(projectID), 16) + hex.EncodeToString(hash.Sum(nil))
|
||||
return strconv.FormatUint(uint64(projectID), 16) + hex.EncodeToString(hash.Sum(nil)), idErr
|
||||
}
|
||||
|
||||
func WrapCustomEvent(m *CustomEvent) *IssueEvent {
|
||||
|
|
|
|||
|
|
@ -1,17 +1,16 @@
|
|||
package profiling
|
||||
|
||||
import (
|
||||
"github.com/gorilla/mux"
|
||||
"log"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
)
|
||||
|
||||
func Profile() {
|
||||
go func() {
|
||||
router := mux.NewRouter()
|
||||
router.PathPrefix("/debug/pprof/").Handler(http.DefaultServeMux)
|
||||
log.Println("Starting profiler...")
|
||||
if err := http.ListenAndServe(":6060", router); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
|
|
|||
5
backend/pkg/env/worker-id.go
vendored
5
backend/pkg/env/worker-id.go
vendored
|
|
@ -1,9 +1,5 @@
|
|||
package env
|
||||
|
||||
import (
|
||||
"log"
|
||||
)
|
||||
|
||||
func hashHostname(hostname string) uint16 {
|
||||
var h uint16
|
||||
for i, b := range hostname {
|
||||
|
|
@ -15,7 +11,6 @@ func hashHostname(hostname string) uint16 {
|
|||
func WorkerID() uint16 {
|
||||
ip, err := fargateTaskIP()
|
||||
if err != nil {
|
||||
log.Printf("Warning: unable to retrieve Fargate Task IP: %v; trying to use HOSTNAME instead", err)
|
||||
return hashHostname(String("HOSTNAME"))
|
||||
}
|
||||
return uint16(ip[2])<<8 + uint16(ip[3])
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package featureflags
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -119,7 +118,6 @@ func numArrayToIntSlice(arr *pgtype.EnumArray) []int {
|
|||
for i := range arr.Elements {
|
||||
num, err := strconv.Atoi(arr.Elements[i].String)
|
||||
if err != nil {
|
||||
log.Printf("can't convert string to int: %v, full arr struct: %+v", err, *arr)
|
||||
slice = append(slice, 0)
|
||||
} else {
|
||||
slice = append(slice, num)
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ package ios
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
|
||||
"openreplay/backend/pkg/handlers"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
|
@ -22,7 +22,6 @@ type TapRageDetector struct {
|
|||
func (h *TapRageDetector) createPayload() string {
|
||||
p, err := json.Marshal(struct{ Count int }{h.countsInARow})
|
||||
if err != nil {
|
||||
log.Printf("can't marshal TapRage payload to json: %s", err)
|
||||
return ""
|
||||
}
|
||||
return string(p)
|
||||
|
|
|
|||
|
|
@ -2,8 +2,6 @@ package web
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
|
@ -33,7 +31,6 @@ func (crd *ClickRageDetector) reset() {
|
|||
func (crd *ClickRageDetector) createPayload() string {
|
||||
p, err := json.Marshal(struct{ Count int }{crd.countsInARow})
|
||||
if err != nil {
|
||||
log.Printf("can't marshal ClickRage payload to json: %s", err)
|
||||
return ""
|
||||
}
|
||||
return string(p)
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package web
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/messages/performance"
|
||||
|
|
@ -32,7 +31,7 @@ func (f *CpuIssueDetector) createPayload() string {
|
|||
Rate uint64
|
||||
}{f.duration(), f.maxRate})
|
||||
if err != nil {
|
||||
log.Printf("can't marshal CpuIssue payload to json: %s", err)
|
||||
return ""
|
||||
}
|
||||
return string(p)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package web
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"math"
|
||||
|
||||
. "openreplay/backend/pkg/messages"
|
||||
|
|
@ -32,7 +31,7 @@ func (f *MemoryIssueDetector) Build() Message {
|
|||
}
|
||||
payload, err := json.Marshal(struct{ Rate int }{f.rate - 100})
|
||||
if err != nil {
|
||||
log.Printf("can't marshal MemoryIssue payload to json: %s", err)
|
||||
payload = []byte("{}")
|
||||
}
|
||||
event := &IssueEvent{
|
||||
Type: "memory",
|
||||
|
|
|
|||
|
|
@ -1,18 +1,20 @@
|
|||
package integrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
config "openreplay/backend/internal/config/integrations"
|
||||
"openreplay/backend/pkg/intervals"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/token"
|
||||
)
|
||||
|
||||
type Listener struct {
|
||||
log logger.Logger
|
||||
cfg *config.Config
|
||||
storage Storage
|
||||
producer types.Producer
|
||||
|
|
@ -21,8 +23,9 @@ type Listener struct {
|
|||
Errors chan error
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, storage Storage, producer types.Producer, manager *Manager, tokenizer *token.Tokenizer) (*Listener, error) {
|
||||
func New(log logger.Logger, cfg *config.Config, storage Storage, producer types.Producer, manager *Manager, tokenizer *token.Tokenizer) (*Listener, error) {
|
||||
listener := &Listener{
|
||||
log: log,
|
||||
cfg: cfg,
|
||||
storage: storage,
|
||||
Errors: make(chan error),
|
||||
|
|
@ -37,7 +40,7 @@ func New(cfg *config.Config, storage Storage, producer types.Producer, manager *
|
|||
for _, i := range ints {
|
||||
// Add new integration to manager
|
||||
if err = manager.Update(i); err != nil {
|
||||
log.Printf("Integration parse error: %v | Integration: %v\n", err, *i)
|
||||
log.Error(context.Background(), "integration parse error: %v | integration: %v", err, *i)
|
||||
}
|
||||
}
|
||||
manager.RequestAll()
|
||||
|
|
@ -53,12 +56,12 @@ func (l *Listener) worker() {
|
|||
case <-clientsCheckTick:
|
||||
l.manager.RequestAll()
|
||||
case event := <-l.manager.Events:
|
||||
log.Printf("New integration event: %+v\n", *event.IntegrationEvent)
|
||||
l.log.Info(context.Background(), "new integration event: %+v", *event.IntegrationEvent)
|
||||
sessionID := event.SessionID
|
||||
if sessionID == 0 {
|
||||
sessData, err := l.tokenizer.Parse(event.Token)
|
||||
if err != nil && err != token.EXPIRED {
|
||||
log.Printf("Error on token parsing: %v; Token: %v", err, event.Token)
|
||||
l.log.Error(context.Background(), "error on token parsing: %v; token: %v", err, event.Token)
|
||||
continue
|
||||
}
|
||||
sessionID = sessData.ID
|
||||
|
|
@ -66,10 +69,10 @@ func (l *Listener) worker() {
|
|||
// Why do we produce integration events to analytics topic
|
||||
l.producer.Produce(l.cfg.TopicAnalytics, sessionID, event.IntegrationEvent.Encode())
|
||||
case err := <-l.manager.Errors:
|
||||
log.Printf("Integration error: %v\n", err)
|
||||
l.log.Error(context.Background(), "integration error: %v", err)
|
||||
case i := <-l.manager.RequestDataUpdates:
|
||||
if err := l.storage.Update(&i); err != nil {
|
||||
log.Printf("Postgres Update request_data error: %v\n", err)
|
||||
l.log.Error(context.Background(), "Postgres update request_data error: %v", err)
|
||||
}
|
||||
default:
|
||||
newNotification, err := l.storage.CheckNew()
|
||||
|
|
@ -80,10 +83,10 @@ func (l *Listener) worker() {
|
|||
l.Errors <- fmt.Errorf("Integration storage error: %v", err)
|
||||
continue
|
||||
}
|
||||
log.Printf("Integration update: %v\n", *newNotification)
|
||||
l.log.Info(context.Background(), "integration update: %v", *newNotification)
|
||||
err = l.manager.Update(newNotification)
|
||||
if err != nil {
|
||||
log.Printf("Integration parse error: %v | Integration: %v\n", err, *newNotification)
|
||||
l.log.Error(context.Background(), "integration parse error: %v | integration: %v", err, *newNotification)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,20 +1,23 @@
|
|||
package integrations
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
"openreplay/backend/pkg/integrations/clients"
|
||||
"openreplay/backend/pkg/integrations/model"
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type Manager struct {
|
||||
log logger.Logger
|
||||
clientMap clients.ClientMap
|
||||
Events chan *clients.SessionErrorEvent
|
||||
Errors chan error
|
||||
RequestDataUpdates chan model.Integration // not pointer because it could change in other thread
|
||||
}
|
||||
|
||||
func NewManager() *Manager {
|
||||
func NewManager(log logger.Logger) *Manager {
|
||||
return &Manager{
|
||||
log: log,
|
||||
clientMap: make(clients.ClientMap),
|
||||
RequestDataUpdates: make(chan model.Integration, 100),
|
||||
Events: make(chan *clients.SessionErrorEvent, 100),
|
||||
|
|
@ -23,7 +26,7 @@ func NewManager() *Manager {
|
|||
}
|
||||
|
||||
func (m *Manager) Update(i *model.Integration) (err error) {
|
||||
log.Printf("Integration initialization: %v\n", *i)
|
||||
m.log.Info(context.Background(), "Integration initialization: %v\n", *i)
|
||||
key := i.GetKey()
|
||||
if i.Options == nil {
|
||||
delete(m.clientMap, key)
|
||||
|
|
@ -41,7 +44,7 @@ func (m *Manager) Update(i *model.Integration) (err error) {
|
|||
}
|
||||
|
||||
func (m *Manager) RequestAll() {
|
||||
log.Printf("Requesting all...\n")
|
||||
m.log.Info(context.Background(), "Requesting all...")
|
||||
for _, c := range m.clientMap {
|
||||
go c.Request()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,8 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"openreplay/backend/pkg/logger"
|
||||
|
||||
"openreplay/backend/pkg/integrations/model"
|
||||
"time"
|
||||
|
||||
|
|
@ -21,11 +22,13 @@ type Storage interface {
|
|||
|
||||
type storageImpl struct {
|
||||
conn *pgx.Conn
|
||||
log logger.Logger
|
||||
}
|
||||
|
||||
func NewStorage(conn *pgx.Conn) Storage {
|
||||
func NewStorage(conn *pgx.Conn, log logger.Logger) Storage {
|
||||
return &storageImpl{
|
||||
conn: conn,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -69,7 +72,7 @@ func (s *storageImpl) GetAll() ([]*model.Integration, error) {
|
|||
for rows.Next() {
|
||||
i := new(model.Integration)
|
||||
if err := rows.Scan(&i.ProjectID, &i.Provider, &i.Options, &i.RequestData); err != nil {
|
||||
log.Printf("Postgres scan error: %v\n", err)
|
||||
s.log.Error(context.Background(), "postgres scan error: %v", err)
|
||||
continue
|
||||
}
|
||||
integrations = append(integrations, i)
|
||||
|
|
|
|||
68
backend/pkg/logger/logger.go
Normal file
68
backend/pkg/logger/logger.go
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
package logger
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
"os"
|
||||
)
|
||||
|
||||
type Logger interface {
|
||||
Info(ctx context.Context, message string, args ...interface{})
|
||||
Warn(ctx context.Context, message string, args ...interface{})
|
||||
Error(ctx context.Context, message string, args ...interface{})
|
||||
Fatal(ctx context.Context, message string, args ...interface{})
|
||||
}
|
||||
|
||||
type loggerImpl struct {
|
||||
l *zap.Logger
|
||||
}
|
||||
|
||||
func New() Logger {
|
||||
encoderConfig := zap.NewProductionEncoderConfig()
|
||||
encoderConfig.EncodeTime = zapcore.TimeEncoderOfLayout("2006-01-02 15:04:05.000")
|
||||
jsonEncoder := zapcore.NewJSONEncoder(encoderConfig)
|
||||
core := zapcore.NewCore(jsonEncoder, zapcore.AddSync(os.Stdout), zap.InfoLevel)
|
||||
baseLogger := zap.New(core, zap.AddCaller())
|
||||
logger := baseLogger.WithOptions(zap.AddCallerSkip(1))
|
||||
return &loggerImpl{l: logger}
|
||||
}
|
||||
|
||||
func (l *loggerImpl) prepare(ctx context.Context, logger *zap.Logger) *zap.Logger {
|
||||
if sID, ok := ctx.Value("sessionID").(string); ok {
|
||||
logger = logger.With(zap.String("sessionID", sID))
|
||||
}
|
||||
if pID, ok := ctx.Value("projectID").(string); ok {
|
||||
logger = logger.With(zap.String("projectID", pID))
|
||||
}
|
||||
if tVer, ok := ctx.Value("tracker").(string); ok {
|
||||
logger = logger.With(zap.String("tracker", tVer))
|
||||
}
|
||||
if httpMethod, ok := ctx.Value("httpMethod").(string); ok {
|
||||
logger = logger.With(zap.String("httpMethod", httpMethod))
|
||||
}
|
||||
if urlPath, ok := ctx.Value("url").(string); ok {
|
||||
logger = logger.With(zap.String("url", urlPath))
|
||||
}
|
||||
if batch, ok := ctx.Value("batch").(string); ok {
|
||||
logger = logger.With(zap.String("batch", batch))
|
||||
}
|
||||
return logger
|
||||
}
|
||||
|
||||
func (l *loggerImpl) Info(ctx context.Context, message string, args ...interface{}) {
|
||||
l.prepare(ctx, l.l.With(zap.String("level", "info"))).Info(fmt.Sprintf(message, args...))
|
||||
}
|
||||
|
||||
func (l *loggerImpl) Warn(ctx context.Context, message string, args ...interface{}) {
|
||||
l.prepare(ctx, l.l.With(zap.String("level", "warn"))).Warn(fmt.Sprintf(message, args...))
|
||||
}
|
||||
|
||||
func (l *loggerImpl) Error(ctx context.Context, message string, args ...interface{}) {
|
||||
l.prepare(ctx, l.l.With(zap.String("level", "error"))).Error(fmt.Sprintf(message, args...))
|
||||
}
|
||||
|
||||
func (l *loggerImpl) Fatal(ctx context.Context, message string, args ...interface{}) {
|
||||
l.prepare(ctx, l.l.With(zap.String("level", "fatal"))).Fatal(fmt.Sprintf(message, args...))
|
||||
}
|
||||
|
|
@ -3,7 +3,6 @@ package memory
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -24,7 +23,6 @@ func parseMemoryLimit() (int, error) {
|
|||
if strings.Contains(line, Limit) {
|
||||
lineParts := strings.Split(line, " ")
|
||||
if len(lineParts) != 2 {
|
||||
log.Println("can't parse memory limit")
|
||||
return 0, fmt.Errorf("can't split string with memory limit, str: %s", line)
|
||||
}
|
||||
value, err := strconv.Atoi(lineParts[1])
|
||||
|
|
@ -36,7 +34,6 @@ func parseMemoryLimit() (int, error) {
|
|||
}
|
||||
// DEBUG_LOG
|
||||
value /= 1024 * 1024
|
||||
log.Printf("memory limit is defined: %d MiB", value)
|
||||
return value, nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
package memory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type Manager interface {
|
||||
|
|
@ -13,6 +15,7 @@ type Manager interface {
|
|||
}
|
||||
|
||||
type managerImpl struct {
|
||||
log logger.Logger
|
||||
mutex *sync.RWMutex
|
||||
current uint64
|
||||
maximum uint64
|
||||
|
|
@ -21,12 +24,15 @@ type managerImpl struct {
|
|||
total uint64
|
||||
}
|
||||
|
||||
func NewManager(maximumMemory, thresholdValue uint64) (Manager, error) {
|
||||
func NewManager(log logger.Logger, maximumMemory, thresholdValue uint64) (Manager, error) {
|
||||
ctx := context.Background()
|
||||
if maximumMemory < 1 {
|
||||
log.Println("maximumMemory is not defined, try to parse memory limit from system")
|
||||
log.Info(ctx, "maximumMemory is not defined, try to parse memory limit from system")
|
||||
memLimit, err := parseMemoryLimit()
|
||||
if err != nil {
|
||||
log.Println("can't parse system memory limit, err: ", err)
|
||||
log.Error(ctx, "can't parse system memory limit, err: ", err)
|
||||
} else {
|
||||
log.Info(ctx, "memory limit is defined: %d MiB", memLimit)
|
||||
}
|
||||
if memLimit > 0 {
|
||||
maximumMemory = uint64(memLimit)
|
||||
|
|
@ -36,6 +42,7 @@ func NewManager(maximumMemory, thresholdValue uint64) (Manager, error) {
|
|||
return nil, errors.New("threshold must be less than 100")
|
||||
}
|
||||
m := &managerImpl{
|
||||
log: log,
|
||||
mutex: &sync.RWMutex{},
|
||||
threshold: thresholdValue,
|
||||
maximum: maximumMemory,
|
||||
|
|
@ -57,7 +64,7 @@ func (m *managerImpl) calcMemoryUsage() {
|
|||
allocated := rtm.Alloc / 1024 / 1024
|
||||
total := rtm.Sys / 1024 / 1024
|
||||
if allocated > m.maximum && m.HasFreeMemory() {
|
||||
log.Println("memory consumption is greater than maximum memory, current: ", allocated, "maximum: ", m.maximum)
|
||||
m.log.Warn(context.Background(), "memory consumption is greater than maximum memory, current: %d, maximum: %d", allocated, m.maximum)
|
||||
}
|
||||
current := uint64(float64(allocated*100) / float64(m.maximum))
|
||||
m.mutex.Lock()
|
||||
|
|
@ -68,7 +75,7 @@ func (m *managerImpl) calcMemoryUsage() {
|
|||
}
|
||||
|
||||
func (m *managerImpl) printStat() {
|
||||
log.Printf("current memory consumption: %d, allocated: %d, maximum: %d, current usage: %d, threshold: %d",
|
||||
m.log.Info(context.Background(), "current memory consumption: %d, allocated: %d, maximum: %d, current usage: %d, threshold: %d",
|
||||
m.total, m.allocated, m.maximum, m.current, m.threshold)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,16 +1,18 @@
|
|||
package messages
|
||||
|
||||
import "openreplay/backend/pkg/logger"
|
||||
|
||||
type enderIteratorImpl struct {
|
||||
coreIterator MessageIterator
|
||||
handler MessageHandler
|
||||
lastMessage Message
|
||||
}
|
||||
|
||||
func NewEnderMessageIterator(messageHandler MessageHandler, messageFilter []int, autoDecode bool) MessageIterator {
|
||||
func NewEnderMessageIterator(log logger.Logger, messageHandler MessageHandler, messageFilter []int, autoDecode bool) MessageIterator {
|
||||
enderIter := &enderIteratorImpl{
|
||||
handler: messageHandler,
|
||||
}
|
||||
enderIter.coreIterator = NewMessageIterator(enderIter.handle, messageFilter, autoDecode)
|
||||
enderIter.coreIterator = NewMessageIterator(log, enderIter.handle, messageFilter, autoDecode)
|
||||
return enderIter
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
package messages
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/metrics/sink"
|
||||
)
|
||||
|
||||
|
|
@ -9,11 +10,11 @@ type sinkIteratorImpl struct {
|
|||
handler MessageHandler
|
||||
}
|
||||
|
||||
func NewSinkMessageIterator(messageHandler MessageHandler, messageFilter []int, autoDecode bool) MessageIterator {
|
||||
func NewSinkMessageIterator(log logger.Logger, messageHandler MessageHandler, messageFilter []int, autoDecode bool) MessageIterator {
|
||||
iter := &sinkIteratorImpl{
|
||||
handler: messageHandler,
|
||||
}
|
||||
iter.coreIterator = NewMessageIterator(iter.handle, messageFilter, autoDecode)
|
||||
iter.coreIterator = NewMessageIterator(log, iter.handle, messageFilter, autoDecode)
|
||||
return iter
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
package messages
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
// MessageHandler processes one message using service logic
|
||||
|
|
@ -14,6 +15,7 @@ type MessageIterator interface {
|
|||
}
|
||||
|
||||
type messageIteratorImpl struct {
|
||||
log logger.Logger
|
||||
filter map[int]struct{}
|
||||
preFilter map[int]struct{}
|
||||
handler MessageHandler
|
||||
|
|
@ -27,8 +29,9 @@ type messageIteratorImpl struct {
|
|||
urls *pageLocations
|
||||
}
|
||||
|
||||
func NewMessageIterator(messageHandler MessageHandler, messageFilter []int, autoDecode bool) MessageIterator {
|
||||
func NewMessageIterator(log logger.Logger, messageHandler MessageHandler, messageFilter []int, autoDecode bool) MessageIterator {
|
||||
iter := &messageIteratorImpl{
|
||||
log: log,
|
||||
handler: messageHandler,
|
||||
autoDecode: autoDecode,
|
||||
urls: NewPageLocations(),
|
||||
|
|
@ -58,12 +61,14 @@ func (i *messageIteratorImpl) prepareVars(batchInfo *BatchInfo) {
|
|||
}
|
||||
|
||||
func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
||||
ctx := context.WithValue(context.Background(), "sessionID", batchInfo.sessionID)
|
||||
|
||||
// Create new message reader
|
||||
reader := NewMessageReader(batchData)
|
||||
|
||||
// Pre-decode batch data
|
||||
if err := reader.Parse(); err != nil {
|
||||
log.Printf("pre-decode batch err: %s, info: %s", err, batchInfo.Info())
|
||||
i.log.Error(ctx, "pre-decode batch err: %s, info: %s", err, batchInfo.Info())
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -81,12 +86,12 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
|||
if _, ok := i.preFilter[msg.TypeID()]; ok {
|
||||
msg = msg.Decode()
|
||||
if msg == nil {
|
||||
log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info())
|
||||
i.log.Error(ctx, "decode error, type: %d, info: %s", msgType, i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
msg = transformDeprecated(msg)
|
||||
if err := i.preprocessing(msg); err != nil {
|
||||
log.Printf("message preprocessing err: %s", err)
|
||||
i.log.Error(ctx, "message preprocessing err: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -101,7 +106,7 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
|||
if i.autoDecode {
|
||||
msg = msg.Decode()
|
||||
if msg == nil {
|
||||
log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info())
|
||||
i.log.Error(ctx, "decode error, type: %d, info: %s", msgType, i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -125,7 +130,8 @@ func (i *messageIteratorImpl) getIOSTimestamp(msg Message) uint64 {
|
|||
}
|
||||
|
||||
func (i *messageIteratorImpl) zeroTsLog(msgType string) {
|
||||
log.Printf("zero timestamp in %s, info: %s", msgType, i.batchInfo.Info())
|
||||
ctx := context.WithValue(context.Background(), "sessionID", i.batchInfo.sessionID)
|
||||
i.log.Warn(ctx, "zero timestamp in %s, info: %s", msgType, i.batchInfo.Info())
|
||||
}
|
||||
|
||||
func (i *messageIteratorImpl) preprocessing(msg Message) error {
|
||||
|
|
@ -170,7 +176,8 @@ func (i *messageIteratorImpl) preprocessing(msg Message) error {
|
|||
i.messageInfo.Timestamp = m.Timestamp
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("SessionStart")
|
||||
log.Printf("zero session start, project: %d, UA: %s, tracker: %s, info: %s",
|
||||
ctx := context.WithValue(context.Background(), "sessionID", i.batchInfo.sessionID)
|
||||
i.log.Warn(ctx, "zero timestamp in SessionStart, project: %d, UA: %s, tracker: %s, info: %s",
|
||||
m.ProjectID, m.UserAgent, m.TrackerVersion, i.batchInfo.Info())
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"sort"
|
||||
)
|
||||
|
||||
|
|
@ -21,27 +20,28 @@ func (m *msgInfo) Print() string {
|
|||
return fmt.Sprintf("index: %d, start: %d, end: %d, type: %d, body: %s", m.index, m.start, m.end, m.msgType, m.body)
|
||||
}
|
||||
|
||||
func SplitMessages(sessID string, data []byte) ([]*msgInfo, error) {
|
||||
func SplitMessages(data []byte) ([]*msgInfo, error) {
|
||||
messages := make([]*msgInfo, 0)
|
||||
indexes := make(map[uint64]bool)
|
||||
hadDuplicates := false
|
||||
var lastTimestamp uint64
|
||||
reader := NewBytesReader(data)
|
||||
var err error = nil
|
||||
for {
|
||||
// Get message start
|
||||
msgStart := reader.Pointer()
|
||||
if int(msgStart) >= len(data) {
|
||||
return messages, nil
|
||||
return messages, err
|
||||
}
|
||||
|
||||
// Read message index
|
||||
msgIndex, err := reader.ReadIndex()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
log.Println(reader.Pointer(), msgStart)
|
||||
return messages, fmt.Errorf("read message index err: %s", err)
|
||||
return messages, fmt.Errorf("can't read message's index, msgStart: %d, pointer: %d, err: %s",
|
||||
msgStart, reader.Pointer(), err)
|
||||
}
|
||||
return messages, nil
|
||||
return messages, err
|
||||
}
|
||||
|
||||
// Read message type
|
||||
|
|
@ -58,7 +58,6 @@ func SplitMessages(sessID string, data []byte) ([]*msgInfo, error) {
|
|||
|
||||
if _, ok := indexes[msgIndex]; ok && !hadDuplicates {
|
||||
hadDuplicates = true
|
||||
log.Printf("Session %s has duplicate messages", sessID)
|
||||
continue
|
||||
}
|
||||
indexes[msgIndex] = true
|
||||
|
|
@ -67,6 +66,7 @@ func SplitMessages(sessID string, data []byte) ([]*msgInfo, error) {
|
|||
if msgType == MsgTimestamp {
|
||||
msgBody := body.(*Timestamp)
|
||||
lastTimestamp = msgBody.Timestamp
|
||||
err = fmt.Errorf("session has duplicate messages")
|
||||
}
|
||||
|
||||
// Add new message info to messages slice
|
||||
|
|
|
|||
|
|
@ -1,24 +1,29 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type MetricServer struct {
|
||||
registry *prometheus.Registry
|
||||
}
|
||||
|
||||
func New() *MetricServer {
|
||||
func New(log logger.Logger, cs []prometheus.Collector) {
|
||||
registry := prometheus.NewRegistry()
|
||||
// Add go runtime metrics and process collectors.
|
||||
registry.MustRegister(
|
||||
collectors.NewGoCollector(),
|
||||
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
|
||||
)
|
||||
// Add extra metrics
|
||||
registry.MustRegister(cs...)
|
||||
// Expose /metrics HTTP endpoint using the created custom registry.
|
||||
http.Handle(
|
||||
"/metrics", promhttp.HandlerFor(
|
||||
|
|
@ -28,13 +33,6 @@ func New() *MetricServer {
|
|||
}),
|
||||
)
|
||||
go func() {
|
||||
log.Println(http.ListenAndServe(":8888", nil))
|
||||
log.Error(context.Background(), "%v", http.ListenAndServe(":8888", nil))
|
||||
}()
|
||||
return &MetricServer{
|
||||
registry: registry,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MetricServer) Register(cs []prometheus.Collector) {
|
||||
s.registry.MustRegister(cs...)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,5 @@
|
|||
package projects
|
||||
|
||||
import "log"
|
||||
|
||||
type Project struct {
|
||||
ProjectID uint32
|
||||
ProjectKey string
|
||||
|
|
@ -24,7 +22,6 @@ type Project struct {
|
|||
|
||||
func (p *Project) GetMetadataNo(key string) uint {
|
||||
if p == nil {
|
||||
log.Printf("GetMetadataNo: Project is nil")
|
||||
return 0
|
||||
}
|
||||
if p.Metadata1 != nil && *(p.Metadata1) == key {
|
||||
|
|
|
|||
|
|
@ -1,13 +1,14 @@
|
|||
package projects
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/cache"
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/db/redis"
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type Projects interface {
|
||||
|
|
@ -16,15 +17,17 @@ type Projects interface {
|
|||
}
|
||||
|
||||
type projectsImpl struct {
|
||||
log logger.Logger
|
||||
db pool.Pool
|
||||
cache Cache
|
||||
projectsByID cache.Cache
|
||||
projectsByKeys cache.Cache
|
||||
}
|
||||
|
||||
func New(db pool.Pool, redis *redis.Client) Projects {
|
||||
func New(log logger.Logger, db pool.Pool, redis *redis.Client) Projects {
|
||||
cl := NewCache(redis)
|
||||
return &projectsImpl{
|
||||
log: log,
|
||||
db: db,
|
||||
cache: cl,
|
||||
projectsByID: cache.New(time.Minute*5, time.Minute*10),
|
||||
|
|
@ -46,7 +49,8 @@ func (c *projectsImpl) GetProject(projectID uint32) (*Project, error) {
|
|||
}
|
||||
c.projectsByID.Set(projectID, p)
|
||||
if err = c.cache.Set(p); err != nil && !errors.Is(err, ErrDisabledCache) {
|
||||
log.Printf("Failed to cache project: %v", err)
|
||||
ctx := context.WithValue(context.Background(), "projectID", projectID)
|
||||
c.log.Error(ctx, "failed to cache project: %s", err)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
|
@ -65,7 +69,8 @@ func (c *projectsImpl) GetProjectByKey(projectKey string) (*Project, error) {
|
|||
}
|
||||
c.projectsByKeys.Set(projectKey, p)
|
||||
if err := c.cache.Set(p); err != nil && !errors.Is(err, ErrDisabledCache) {
|
||||
log.Printf("Failed to cache project: %v", err)
|
||||
ctx := context.WithValue(context.Background(), "projectKey", projectKey)
|
||||
c.log.Error(ctx, "failed to cache project: %s", err)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,8 +3,6 @@ package redisstream
|
|||
import (
|
||||
"log"
|
||||
"net"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -12,6 +10,9 @@ import (
|
|||
|
||||
_redis "github.com/go-redis/redis"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
type idsInfo struct {
|
||||
|
|
@ -32,7 +33,10 @@ type Consumer struct {
|
|||
}
|
||||
|
||||
func NewConsumer(group string, streams []string, messageIterator messages.MessageIterator) *Consumer {
|
||||
redis := getRedisClient()
|
||||
redis, err := getRedisClient()
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
for _, stream := range streams {
|
||||
err := redis.XGroupCreateMkStream(stream, group, "0").Err()
|
||||
if err != nil && err.Error() != "BUSYGROUP Consumer Group name already exists" {
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package redisstream
|
|||
|
||||
import (
|
||||
"github.com/go-redis/redis"
|
||||
"log"
|
||||
|
||||
"openreplay/backend/pkg/env"
|
||||
)
|
||||
|
|
@ -12,8 +13,12 @@ type Producer struct {
|
|||
}
|
||||
|
||||
func NewProducer() *Producer {
|
||||
redClient, err := getRedisClient()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return &Producer{
|
||||
redis: getRedisClient(),
|
||||
redis: redClient,
|
||||
maxLenApprox: int64(env.Uint64("REDIS_STREAMS_MAX_LEN")),
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package redisstream
|
||||
|
||||
import (
|
||||
"log"
|
||||
"regexp"
|
||||
|
||||
"github.com/go-redis/redis"
|
||||
|
|
@ -11,9 +10,9 @@ import (
|
|||
|
||||
var redisClient *redis.Client
|
||||
|
||||
func getRedisClient() *redis.Client {
|
||||
func getRedisClient() (*redis.Client, error) {
|
||||
if redisClient != nil {
|
||||
return redisClient
|
||||
return redisClient, nil
|
||||
}
|
||||
|
||||
connectionString := env.String("REDIS_STRING")
|
||||
|
|
@ -25,12 +24,12 @@ func getRedisClient() *redis.Client {
|
|||
|
||||
options, err := redis.ParseURL(connectionString)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
redisClient = redis.NewClient(options)
|
||||
if _, err := redisClient.Ping().Result(); err != nil {
|
||||
log.Fatalln(err)
|
||||
return nil, err
|
||||
}
|
||||
return redisClient
|
||||
return redisClient, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
package sessions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/cache"
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type Cache interface {
|
||||
|
|
@ -18,13 +19,23 @@ type Cache interface {
|
|||
var ErrSessionNotFound = errors.New("session not found")
|
||||
|
||||
type inMemoryCacheImpl struct {
|
||||
log logger.Logger
|
||||
sessions cache.Cache
|
||||
redis Cache
|
||||
}
|
||||
|
||||
func NewInMemoryCache(log logger.Logger, redisCache Cache) Cache {
|
||||
return &inMemoryCacheImpl{
|
||||
log: log,
|
||||
sessions: cache.New(time.Minute*3, time.Minute*10),
|
||||
redis: redisCache,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *inMemoryCacheImpl) SetCache(sessID uint64, data map[string]string) error {
|
||||
if err := i.redis.SetCache(sessID, data); err != nil && !errors.Is(err, ErrDisabledCache) {
|
||||
log.Printf("Failed to cache session: %v", err)
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sessID)
|
||||
i.log.Warn(ctx, "failed to cache session: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -35,7 +46,8 @@ func (i *inMemoryCacheImpl) GetCache(sessID uint64) (map[string]string, error) {
|
|||
return session, nil
|
||||
}
|
||||
if !errors.Is(err, ErrDisabledCache) && err.Error() != "redis: nil" {
|
||||
log.Printf("Failed to get session from cache: %v", err)
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sessID)
|
||||
i.log.Warn(ctx, "failed to get session from cache: %s", err)
|
||||
}
|
||||
return nil, ErrSessionNotFound
|
||||
}
|
||||
|
|
@ -43,7 +55,8 @@ func (i *inMemoryCacheImpl) GetCache(sessID uint64) (map[string]string, error) {
|
|||
func (i *inMemoryCacheImpl) Set(session *Session) error {
|
||||
i.sessions.Set(session.SessionID, session)
|
||||
if err := i.redis.Set(session); err != nil && !errors.Is(err, ErrDisabledCache) {
|
||||
log.Printf("Failed to cache session: %v", err)
|
||||
ctx := context.WithValue(context.Background(), "sessionID", session.SessionID)
|
||||
i.log.Warn(ctx, "failed to cache session: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -58,14 +71,8 @@ func (i *inMemoryCacheImpl) Get(sessionID uint64) (*Session, error) {
|
|||
return session, nil
|
||||
}
|
||||
if !errors.Is(err, ErrDisabledCache) && err.Error() != "redis: nil" {
|
||||
log.Printf("Failed to get session from cache: %v", err)
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sessionID)
|
||||
i.log.Warn(ctx, "failed to get session from cache: %s", err)
|
||||
}
|
||||
return nil, ErrSessionNotFound
|
||||
}
|
||||
|
||||
func NewInMemoryCache(redisCache Cache) Cache {
|
||||
return &inMemoryCacheImpl{
|
||||
sessions: cache.New(time.Minute*3, time.Minute*10),
|
||||
redis: redisCache,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
package sessions
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/db/redis"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/projects"
|
||||
"openreplay/backend/pkg/url"
|
||||
)
|
||||
|
|
@ -29,25 +31,28 @@ type Sessions interface {
|
|||
}
|
||||
|
||||
type sessionsImpl struct {
|
||||
log logger.Logger
|
||||
cache Cache
|
||||
storage Storage
|
||||
updates Updates
|
||||
projects projects.Projects
|
||||
}
|
||||
|
||||
func New(db pool.Pool, proj projects.Projects, redis *redis.Client) Sessions {
|
||||
func New(log logger.Logger, db pool.Pool, proj projects.Projects, redis *redis.Client) Sessions {
|
||||
return &sessionsImpl{
|
||||
cache: NewInMemoryCache(NewCache(redis)),
|
||||
log: log,
|
||||
cache: NewInMemoryCache(log, NewCache(redis)),
|
||||
storage: NewStorage(db),
|
||||
updates: NewSessionUpdates(db),
|
||||
updates: NewSessionUpdates(log, db),
|
||||
projects: proj,
|
||||
}
|
||||
}
|
||||
|
||||
// Add usage: /start endpoint in http service
|
||||
func (s *sessionsImpl) Add(session *Session) error {
|
||||
ctx := context.WithValue(context.Background(), "sessionID", session.SessionID)
|
||||
if cachedSession, err := s.cache.Get(session.SessionID); err == nil {
|
||||
log.Printf("[!] Session %d already exists in cache, new: %+v, cached: %+v", session.SessionID, session, cachedSession)
|
||||
s.log.Info(ctx, "[!] Session already exists in cache, new: %+v, cached: %+v", session, cachedSession)
|
||||
}
|
||||
err := s.storage.Add(session)
|
||||
if err != nil {
|
||||
|
|
@ -59,7 +64,7 @@ func (s *sessionsImpl) Add(session *Session) error {
|
|||
}
|
||||
session.SaveRequestPayload = proj.SaveRequestPayloads
|
||||
if err := s.cache.Set(session); err != nil {
|
||||
log.Printf("Failed to cache session: %v", err)
|
||||
s.log.Warn(ctx, "failed to cache session: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -72,8 +77,7 @@ func (s *sessionsImpl) AddUnStarted(sess *UnStartedSession) error {
|
|||
func (s *sessionsImpl) getFromDB(sessionID uint64) (*Session, error) {
|
||||
session, err := s.storage.Get(sessionID)
|
||||
if err != nil {
|
||||
log.Printf("Failed to get session from postgres: %v", err)
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get session from postgres: %s", err)
|
||||
}
|
||||
proj, err := s.projects.GetProject(session.ProjectID)
|
||||
if err != nil {
|
||||
|
|
@ -105,7 +109,8 @@ func (s *sessionsImpl) GetUpdated(sessionID uint64) (*Session, error) {
|
|||
return nil, err
|
||||
}
|
||||
if err := s.cache.Set(session); err != nil {
|
||||
log.Printf("Failed to cache session: %v", err)
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sessionID)
|
||||
s.log.Warn(ctx, "failed to cache session: %s", err)
|
||||
}
|
||||
return session, nil
|
||||
}
|
||||
|
|
@ -131,7 +136,8 @@ func (s *sessionsImpl) GetDuration(sessionID uint64) (uint64, error) {
|
|||
return 0, err
|
||||
}
|
||||
if err := s.cache.Set(session); err != nil {
|
||||
log.Printf("Failed to cache session: %v", err)
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sessionID)
|
||||
s.log.Warn(ctx, "failed to cache session: %s", err)
|
||||
}
|
||||
if session.Duration != nil {
|
||||
return *session.Duration, nil
|
||||
|
|
@ -153,30 +159,32 @@ func (s *sessionsImpl) UpdateDuration(sessionID uint64, timestamp uint64) (uint6
|
|||
|
||||
session.Duration = &newDuration
|
||||
if err := s.cache.Set(session); err != nil {
|
||||
log.Printf("Failed to cache session: %v", err)
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sessionID)
|
||||
s.log.Warn(ctx, "failed to cache session: %s", err)
|
||||
}
|
||||
return newDuration, nil
|
||||
}
|
||||
|
||||
// UpdateEncryptionKey usage: in ender to update session encryption key if encryption is enabled
|
||||
func (s *sessionsImpl) UpdateEncryptionKey(sessionID uint64, key []byte) error {
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sessionID)
|
||||
if err := s.storage.InsertEncryptionKey(sessionID, key); err != nil {
|
||||
return err
|
||||
}
|
||||
if session, err := s.cache.Get(sessionID); err != nil {
|
||||
session.EncryptionKey = string(key)
|
||||
if err := s.cache.Set(session); err != nil {
|
||||
log.Printf("Failed to cache session: %v", err)
|
||||
s.log.Warn(ctx, "failed to cache session: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
session, err := s.getFromDB(sessionID)
|
||||
if err != nil {
|
||||
log.Printf("Failed to get session from postgres: %v", err)
|
||||
s.log.Error(ctx, "failed to get session from postgres: %s", err)
|
||||
return nil
|
||||
}
|
||||
if err := s.cache.Set(session); err != nil {
|
||||
log.Printf("Failed to cache session: %v", err)
|
||||
s.log.Warn(ctx, "failed to cache session: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,8 +2,9 @@ package sessions
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/jackc/pgtype"
|
||||
"log"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
)
|
||||
|
||||
|
|
@ -134,9 +135,7 @@ func (s *storageImpl) Get(sessionID uint64) (*Session, error) {
|
|||
if revID != nil {
|
||||
sess.RevID = *revID
|
||||
}
|
||||
if err := issueTypes.AssignTo(&sess.IssueTypes); err != nil {
|
||||
log.Printf("can't scan IssueTypes, err: %s", err)
|
||||
}
|
||||
issueTypes.AssignTo(&sess.IssueTypes)
|
||||
if userState != nil {
|
||||
sess.UserState = *userState
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,13 +1,14 @@
|
|||
package sessions
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
)
|
||||
|
||||
|
|
@ -22,12 +23,14 @@ type Updates interface {
|
|||
}
|
||||
|
||||
type updatesImpl struct {
|
||||
log logger.Logger
|
||||
db pool.Pool
|
||||
updates map[uint64]*sessionUpdate
|
||||
}
|
||||
|
||||
func NewSessionUpdates(db pool.Pool) Updates {
|
||||
func NewSessionUpdates(log logger.Logger, db pool.Pool) Updates {
|
||||
return &updatesImpl{
|
||||
log: log,
|
||||
db: db,
|
||||
updates: make(map[uint64]*sessionUpdate),
|
||||
}
|
||||
|
|
@ -93,19 +96,19 @@ func (u *updatesImpl) Commit() {
|
|||
failed := false
|
||||
for i := 0; i < l; i++ {
|
||||
if _, err := br.Exec(); err != nil {
|
||||
log.Printf("Error in PG batch.Exec(): %v \n", err)
|
||||
u.log.Error(context.Background(), "error in PG batch.Exec(): %s", err)
|
||||
failed = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if err := br.Close(); err != nil {
|
||||
log.Printf("Error in PG batch.Close(): %v \n", err)
|
||||
u.log.Error(context.Background(), "error in PG batch.Close(): %s", err)
|
||||
}
|
||||
if failed {
|
||||
for _, upd := range u.updates {
|
||||
if str, args := upd.request(); str != "" {
|
||||
if err := u.db.Exec(str, args...); err != nil {
|
||||
log.Printf("Error in PG Exec(): %v \n", err)
|
||||
u.log.Error(context.Background(), "error in PG Exec(): %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
package tags
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/cache"
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"time"
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type Tag struct {
|
||||
|
|
@ -20,12 +22,14 @@ type Tags interface {
|
|||
}
|
||||
|
||||
type tagsImpl struct {
|
||||
log logger.Logger
|
||||
db pool.Pool
|
||||
cache cache.Cache
|
||||
}
|
||||
|
||||
func New(db pool.Pool) Tags {
|
||||
func New(log logger.Logger, db pool.Pool) Tags {
|
||||
return &tagsImpl{
|
||||
log: log,
|
||||
db: db,
|
||||
cache: cache.New(time.Minute*5, time.Minute*10),
|
||||
}
|
||||
|
|
@ -49,7 +53,8 @@ func (t *tagsImpl) Get(projectID uint32) ([]Tag, error) {
|
|||
)
|
||||
for rows.Next() {
|
||||
if err := rows.Scan(&id, &selector, &ignoreClickRage, &ignoreDeadClick); err != nil {
|
||||
log.Printf("can't scan tag: %s", err)
|
||||
ctx := context.WithValue(context.Background(), "projectID", projectID)
|
||||
t.log.Error(ctx, "can't scan tag: %s", err)
|
||||
continue
|
||||
}
|
||||
tags = append(tags, Tag{
|
||||
|
|
@ -76,7 +81,8 @@ func (t *tagsImpl) ShouldIgnoreTag(projectID uint32, selector string) bool {
|
|||
// Try to load from DB and update cache
|
||||
tagsData, err = t.Get(projectID)
|
||||
if err != nil {
|
||||
log.Printf("can't get tags info: %s", err)
|
||||
ctx := context.WithValue(context.Background(), "projectID", projectID)
|
||||
t.log.Error(ctx, "can't get tags info: %s", err)
|
||||
return false
|
||||
}
|
||||
needToUpdateCache = true
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
package terminator
|
||||
|
||||
import (
|
||||
"log"
|
||||
"context"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
// ServiceStopper is a common interface for all services
|
||||
|
|
@ -12,11 +14,11 @@ type ServiceStopper interface {
|
|||
Stop()
|
||||
}
|
||||
|
||||
func Wait(s ServiceStopper) {
|
||||
func Wait(log logger.Logger, s ServiceStopper) {
|
||||
sigChan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM)
|
||||
sig := <-sigChan
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
log.Info(context.Background(), "caught signal %v: terminating", sig)
|
||||
s.Stop()
|
||||
os.Exit(0)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
package assets
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
|
|
@ -9,13 +8,13 @@ type Rewriter struct {
|
|||
assetsURL *url.URL
|
||||
}
|
||||
|
||||
func NewRewriter(baseOrigin string) *Rewriter {
|
||||
func NewRewriter(baseOrigin string) (*Rewriter, error) {
|
||||
assetsURL, err := url.Parse(baseOrigin)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return nil, err
|
||||
}
|
||||
return &Rewriter{
|
||||
assetsURL: assetsURL,
|
||||
}
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,67 +1,67 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/db/redis"
|
||||
"openreplay/backend/pkg/projects"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
"context"
|
||||
|
||||
config "openreplay/backend/internal/config/connector"
|
||||
"openreplay/backend/internal/connector"
|
||||
saver "openreplay/backend/pkg/connector"
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/db/redis"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/memory"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/objectstorage/store"
|
||||
"openreplay/backend/pkg/projects"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
"openreplay/backend/pkg/terminator"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("can't init object storage: %s", err)
|
||||
log.Fatal(ctx, "can't init object storage: %s", err)
|
||||
}
|
||||
|
||||
var db saver.Database
|
||||
switch cfg.ConnectorType {
|
||||
case "redshift":
|
||||
if db, err = saver.NewRedshift(cfg, objStore); err != nil {
|
||||
log.Fatalf("can't init redshift connection: %s", err)
|
||||
if db, err = saver.NewRedshift(log, cfg, objStore); err != nil {
|
||||
log.Fatal(ctx, "can't init redshift connection: %s", err)
|
||||
}
|
||||
case "clickhouse":
|
||||
if db, err = saver.NewClickHouse(cfg); err != nil {
|
||||
log.Fatalf("can't init clickhouse connection: %s", err)
|
||||
if db, err = saver.NewClickHouse(log, cfg); err != nil {
|
||||
log.Fatal(ctx, "can't init clickhouse connection: %s", err)
|
||||
}
|
||||
default:
|
||||
log.Fatalf("unknown connector type: %s", cfg.ConnectorType)
|
||||
log.Fatal(ctx, "unknown connector type: %s", cfg.ConnectorType)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
// Init postgres connection
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Printf("can't init postgres connection: %s", err)
|
||||
return
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
defer pgConn.Close()
|
||||
|
||||
// Init redis connection
|
||||
redisClient, err := redis.New(&cfg.Redis)
|
||||
if err != nil {
|
||||
log.Printf("can't init redis connection: %s", err)
|
||||
log.Warn(ctx, "can't init redis connection: %s", err)
|
||||
}
|
||||
defer redisClient.Close()
|
||||
|
||||
projManager := projects.New(pgConn, redisClient)
|
||||
sessManager := sessions.New(pgConn, projManager, redisClient)
|
||||
projManager := projects.New(log, pgConn, redisClient)
|
||||
sessManager := sessions.New(log, pgConn, projManager, redisClient)
|
||||
|
||||
// Saves messages to Redshift
|
||||
dataSaver := saver.New(cfg, db, sessManager, projManager)
|
||||
dataSaver := saver.New(log, cfg, db, sessManager, projManager)
|
||||
|
||||
// Message filter
|
||||
msgFilter := []int{messages.MsgConsoleLog, messages.MsgCustomEvent, messages.MsgJSException,
|
||||
|
|
@ -78,20 +78,19 @@ func main() {
|
|||
cfg.TopicRawWeb,
|
||||
cfg.TopicAnalytics,
|
||||
},
|
||||
messages.NewMessageIterator(dataSaver.Handle, msgFilter, true),
|
||||
messages.NewMessageIterator(log, dataSaver.Handle, msgFilter, true),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
||||
// Init memory manager
|
||||
memoryManager, err := memory.NewManager(cfg.MemoryLimitMB, cfg.MaxMemoryUsage)
|
||||
memoryManager, err := memory.NewManager(log, cfg.MemoryLimitMB, cfg.MaxMemoryUsage)
|
||||
if err != nil {
|
||||
log.Printf("can't init memory manager: %s", err)
|
||||
return
|
||||
log.Fatal(ctx, "can't init memory manager: %s", err)
|
||||
}
|
||||
|
||||
// Run service and wait for TERM signal
|
||||
service := connector.New(cfg, consumer, dataSaver, memoryManager)
|
||||
log.Printf("Connector service started\n")
|
||||
terminator.Wait(service)
|
||||
log.Info(ctx, "Connector service started")
|
||||
terminator.Wait(log, service)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,22 +2,23 @@ package connector
|
|||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go/v2"
|
||||
"github.com/ClickHouse/clickhouse-go/v2/lib/driver"
|
||||
|
||||
"openreplay/backend/internal/config/connector"
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type ClickHouse struct {
|
||||
log logger.Logger
|
||||
cfg *connector.Config
|
||||
conn driver.Conn
|
||||
}
|
||||
|
||||
func NewClickHouse(cfg *connector.Config) (*ClickHouse, error) {
|
||||
func NewClickHouse(log logger.Logger, cfg *connector.Config) (*ClickHouse, error) {
|
||||
url := cfg.Clickhouse.URL
|
||||
url = strings.TrimPrefix(url, "tcp://")
|
||||
url = strings.TrimSuffix(url, "/default")
|
||||
|
|
@ -43,6 +44,7 @@ func NewClickHouse(cfg *connector.Config) (*ClickHouse, error) {
|
|||
return nil, err
|
||||
}
|
||||
c := &ClickHouse{
|
||||
log: log,
|
||||
cfg: cfg,
|
||||
conn: conn,
|
||||
}
|
||||
|
|
@ -57,8 +59,10 @@ func (c *ClickHouse) InsertEvents(batch []map[string]string) error {
|
|||
return err
|
||||
}
|
||||
for _, event := range batch {
|
||||
ctx := context.Background()
|
||||
ctx = context.WithValue(ctx, "sessionID", c.Uint64(ctx, event["sessionid"]))
|
||||
if err := bulk.Append(
|
||||
Uint64(event["sessionid"]),
|
||||
c.Uint64(ctx, event["sessionid"]),
|
||||
nullableString(event["consolelog_level"]),
|
||||
nullableString(event["consolelog_value"]),
|
||||
nullableString(event["customevent_name"]),
|
||||
|
|
@ -72,11 +76,11 @@ func (c *ClickHouse) InsertEvents(batch []map[string]string) error {
|
|||
nullableString(event["networkrequest_url"]),
|
||||
nullableString(event["networkrequest_request"]),
|
||||
nullableString(event["networkrequest_response"]),
|
||||
nullableUint64(event["networkrequest_status"]),
|
||||
nullableUint64(event["networkrequest_timestamp"]),
|
||||
nullableUint64(event["networkrequest_duration"]),
|
||||
c.nullableUint64(ctx, event["networkrequest_status"]),
|
||||
c.nullableUint64(ctx, event["networkrequest_timestamp"]),
|
||||
c.nullableUint64(ctx, event["networkrequest_duration"]),
|
||||
nullableString(event["issueevent_message_id"]),
|
||||
nullableUint64(event["issueevent_timestamp"]),
|
||||
c.nullableUint64(ctx, event["issueevent_timestamp"]),
|
||||
nullableString(event["issueevent_type"]),
|
||||
nullableString(event["issueevent_context_string"]),
|
||||
nullableString(event["issueevent_context"]),
|
||||
|
|
@ -84,10 +88,10 @@ func (c *ClickHouse) InsertEvents(batch []map[string]string) error {
|
|||
nullableString(event["issueevent_url"]),
|
||||
nullableString(event["customissue_name"]),
|
||||
nullableString(event["customissue_payload"]),
|
||||
nullableUint64(event["received_at"]),
|
||||
nullableUint64(event["batch_order_number"]),
|
||||
c.nullableUint64(ctx, event["received_at"]),
|
||||
c.nullableUint64(ctx, event["batch_order_number"]),
|
||||
); err != nil {
|
||||
log.Printf("can't append value set to batch, err: %s", err)
|
||||
c.log.Error(ctx, "can't append value set to batch, err: ", err)
|
||||
}
|
||||
}
|
||||
return bulk.Send()
|
||||
|
|
@ -101,42 +105,44 @@ func (c *ClickHouse) InsertSessions(batch []map[string]string) error {
|
|||
return err
|
||||
}
|
||||
for _, sess := range batch {
|
||||
ctx := context.Background()
|
||||
ctx = context.WithValue(ctx, "sessionID", c.Uint64(ctx, sess["sessionid"]))
|
||||
if err := bulk.Append(
|
||||
Uint64(sess["sessionid"]),
|
||||
c.Uint64(ctx, sess["sessionid"]),
|
||||
nullableString(sess["user_agent"]),
|
||||
nullableString(sess["user_browser"]),
|
||||
nullableString(sess["user_browser_version"]),
|
||||
nullableString(sess["user_country"]),
|
||||
nullableString(sess["user_device"]),
|
||||
nullableUint64(sess["user_device_heap_size"]),
|
||||
nullableUint64(sess["user_device_memory_size"]),
|
||||
c.nullableUint64(ctx, sess["user_device_heap_size"]),
|
||||
c.nullableUint64(ctx, sess["user_device_memory_size"]),
|
||||
nullableString(sess["user_device_type"]),
|
||||
nullableString(sess["user_os"]),
|
||||
nullableString(sess["user_os_version"]),
|
||||
nullableString(sess["user_uuid"]),
|
||||
nullableUint64(sess["connection_effective_bandwidth"]),
|
||||
c.nullableUint64(ctx, sess["connection_effective_bandwidth"]),
|
||||
nullableString(sess["connection_type"]),
|
||||
nullableString(sess["referrer"]),
|
||||
nullableString(sess["user_anonymous_id"]),
|
||||
nullableString(sess["user_id"]),
|
||||
nullableUint64(sess["session_start_timestamp"]),
|
||||
nullableUint64(sess["session_end_timestamp"]),
|
||||
nullableUint64(sess["session_duration"]),
|
||||
nullableUint64(sess["first_contentful_paint"]),
|
||||
nullableUint64(sess["speed_index"]),
|
||||
nullableUint64(sess["visually_complete"]),
|
||||
nullableUint64(sess["timing_time_to_interactive"]),
|
||||
nullableUint64(sess["avg_cpu"]),
|
||||
nullableUint64(sess["avg_fps"]),
|
||||
nullableUint64(sess["max_cpu"]),
|
||||
nullableUint64(sess["max_fps"]),
|
||||
nullableUint64(sess["max_total_js_heap_size"]),
|
||||
nullableUint64(sess["max_used_js_heap_size"]),
|
||||
nullableUint64(sess["js_exceptions_count"]),
|
||||
nullableUint64(sess["inputs_count"]),
|
||||
nullableUint64(sess["clicks_count"]),
|
||||
nullableUint64(sess["issues_count"]),
|
||||
nullableUint64(sess["pages_count"]),
|
||||
c.nullableUint64(ctx, sess["session_start_timestamp"]),
|
||||
c.nullableUint64(ctx, sess["session_end_timestamp"]),
|
||||
c.nullableUint64(ctx, sess["session_duration"]),
|
||||
c.nullableUint64(ctx, sess["first_contentful_paint"]),
|
||||
c.nullableUint64(ctx, sess["speed_index"]),
|
||||
c.nullableUint64(ctx, sess["visually_complete"]),
|
||||
c.nullableUint64(ctx, sess["timing_time_to_interactive"]),
|
||||
c.nullableUint64(ctx, sess["avg_cpu"]),
|
||||
c.nullableUint64(ctx, sess["avg_fps"]),
|
||||
c.nullableUint64(ctx, sess["max_cpu"]),
|
||||
c.nullableUint64(ctx, sess["max_fps"]),
|
||||
c.nullableUint64(ctx, sess["max_total_js_heap_size"]),
|
||||
c.nullableUint64(ctx, sess["max_used_js_heap_size"]),
|
||||
c.nullableUint64(ctx, sess["js_exceptions_count"]),
|
||||
c.nullableUint64(ctx, sess["inputs_count"]),
|
||||
c.nullableUint64(ctx, sess["clicks_count"]),
|
||||
c.nullableUint64(ctx, sess["issues_count"]),
|
||||
c.nullableUint64(ctx, sess["pages_count"]),
|
||||
nullableString(sess["metadata_1"]),
|
||||
nullableString(sess["metadata_2"]),
|
||||
nullableString(sess["metadata_3"]),
|
||||
|
|
@ -148,7 +154,7 @@ func (c *ClickHouse) InsertSessions(batch []map[string]string) error {
|
|||
nullableString(sess["metadata_9"]),
|
||||
nullableString(sess["metadata_10"]),
|
||||
); err != nil {
|
||||
log.Printf("can't append value set to batch, err: %s", err)
|
||||
c.log.Error(ctx, "can't append value set to batch, err: ", err)
|
||||
}
|
||||
}
|
||||
return bulk.Send()
|
||||
|
|
@ -158,13 +164,13 @@ func (c *ClickHouse) Close() error {
|
|||
return c.conn.Close()
|
||||
}
|
||||
|
||||
func Uint64(v string) uint64 {
|
||||
func (c *ClickHouse) Uint64(ctx context.Context, v string) uint64 {
|
||||
if v == "" {
|
||||
return 0
|
||||
}
|
||||
res, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
log.Printf("can't convert string to uint64, err: %s", err)
|
||||
c.log.Error(ctx, "can't convert string to uint64, err: %s", err)
|
||||
return 0
|
||||
}
|
||||
return uint64(res)
|
||||
|
|
@ -178,12 +184,12 @@ func nullableString(v string) *string {
|
|||
return p
|
||||
}
|
||||
|
||||
func nullableUint64(v string) *uint64 {
|
||||
func (c *ClickHouse) nullableUint64(ctx context.Context, v string) *uint64 {
|
||||
var p *uint64 = nil
|
||||
if v != "" {
|
||||
res, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
log.Printf("can't convert string to uint64, err: %s", err)
|
||||
c.log.Error(ctx, "can't convert string to uint64, err: %s", err)
|
||||
return nil
|
||||
}
|
||||
a := uint64(res)
|
||||
|
|
|
|||
|
|
@ -5,23 +5,24 @@ import (
|
|||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"log"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
_ "github.com/lib/pq"
|
||||
|
||||
"openreplay/backend/internal/config/connector"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
)
|
||||
|
||||
type Redshift struct {
|
||||
log logger.Logger
|
||||
cfg *connector.Config
|
||||
ctx context.Context
|
||||
db *sql.DB
|
||||
objStorage objectstorage.ObjectStorage
|
||||
}
|
||||
|
||||
func NewRedshift(cfg *connector.Config, objStorage objectstorage.ObjectStorage) (*Redshift, error) {
|
||||
func NewRedshift(log logger.Logger, cfg *connector.Config, objStorage objectstorage.ObjectStorage) (*Redshift, error) {
|
||||
var source string
|
||||
if cfg.ConnectionString != "" {
|
||||
source = cfg.ConnectionString
|
||||
|
|
@ -29,7 +30,7 @@ func NewRedshift(cfg *connector.Config, objStorage objectstorage.ObjectStorage)
|
|||
source = fmt.Sprintf("postgres://%s:%s@%s:%d/%s",
|
||||
cfg.Redshift.User, cfg.Redshift.Password, cfg.Redshift.Host, cfg.Redshift.Port, cfg.Redshift.Database)
|
||||
}
|
||||
log.Println("Connecting to Redshift Source: ", source)
|
||||
log.Info(context.Background(), "Connecting to Redshift Source: ", source)
|
||||
sqldb, err := sql.Open("postgres", source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -38,6 +39,7 @@ func NewRedshift(cfg *connector.Config, objStorage objectstorage.ObjectStorage)
|
|||
return nil, err
|
||||
}
|
||||
return &Redshift{
|
||||
log: log,
|
||||
cfg: cfg,
|
||||
ctx: context.Background(),
|
||||
db: sqldb,
|
||||
|
|
@ -73,15 +75,13 @@ func (r *Redshift) InsertEvents(batch []map[string]string) error {
|
|||
|
||||
reader := bytes.NewReader(buf.Bytes())
|
||||
if err := r.objStorage.Upload(reader, fileName, "text/csv", objectstorage.NoCompression); err != nil {
|
||||
log.Printf("can't upload file to s3: %s", err)
|
||||
return err
|
||||
return fmt.Errorf("can't upload file to s3: %s", err)
|
||||
}
|
||||
// Copy data from s3 bucket to redshift
|
||||
if err := r.Copy(r.cfg.EventsTableName, fileName, "|", true, false); err != nil {
|
||||
log.Printf("can't copy data from s3 to redshift: %s", err)
|
||||
return err
|
||||
return fmt.Errorf("can't copy data from s3 to redshift: %s", err)
|
||||
}
|
||||
log.Printf("events batch of %d events is successfully saved", len(batch))
|
||||
r.log.Info(context.Background(), "events batch of %d events is successfully saved", len(batch))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -113,15 +113,13 @@ func (r *Redshift) InsertSessions(batch []map[string]string) error {
|
|||
|
||||
reader := bytes.NewReader(buf.Bytes())
|
||||
if err := r.objStorage.Upload(reader, fileName, "text/csv", objectstorage.NoCompression); err != nil {
|
||||
log.Printf("can't upload file to s3: %s", err)
|
||||
return err
|
||||
return fmt.Errorf("can't upload file to s3: %s", err)
|
||||
}
|
||||
// Copy data from s3 bucket to redshift
|
||||
if err := r.Copy(r.cfg.SessionsTableName, fileName, "|", true, false); err != nil {
|
||||
log.Printf("can't copy data from s3 to redshift: %s", err)
|
||||
return err
|
||||
return fmt.Errorf("can't copy data from s3 to redshift: %s", err)
|
||||
}
|
||||
log.Printf("sessions batch of %d sessions is successfully saved", len(batch))
|
||||
r.log.Info(context.Background(), "sessions batch of %d sessions is successfully saved", len(batch))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -143,7 +141,6 @@ func (r *Redshift) Copy(tableName, fileName, delimiter string, creds, gzip bool)
|
|||
copySQL := fmt.Sprintf(`COPY "%s" FROM '%s' WITH %s TIMEFORMAT 'auto' DATEFORMAT 'auto' TRUNCATECOLUMNS
|
||||
STATUPDATE ON %s DELIMITER AS '%s' IGNOREHEADER 1 REMOVEQUOTES ESCAPE TRIMBLANKS EMPTYASNULL ACCEPTANYDATE`,
|
||||
tableName, filePath, gzipSQL, credentials, delimiter)
|
||||
log.Printf("Running command: %s", copySQL)
|
||||
|
||||
_, err := r.db.ExecContext(r.ctx, copySQL)
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -1,20 +1,22 @@
|
|||
package connector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"openreplay/backend/internal/http/geoip"
|
||||
"openreplay/backend/pkg/projects"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
config "openreplay/backend/internal/config/connector"
|
||||
"openreplay/backend/internal/http/geoip"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/projects"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
)
|
||||
|
||||
// Saver collect sessions and events and saves them to Redshift
|
||||
type Saver struct {
|
||||
log logger.Logger
|
||||
cfg *config.Config
|
||||
db Database
|
||||
sessModule sessions.Sessions
|
||||
|
|
@ -26,19 +28,21 @@ type Saver struct {
|
|||
events []map[string]string
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, db Database, sessions sessions.Sessions, projects projects.Projects) *Saver {
|
||||
func New(log logger.Logger, cfg *config.Config, db Database, sessions sessions.Sessions, projects projects.Projects) *Saver {
|
||||
ctx := context.Background()
|
||||
if cfg == nil {
|
||||
log.Fatal("connector config is empty")
|
||||
log.Fatal(ctx, "connector config is empty")
|
||||
}
|
||||
// Validate column names in sessions table
|
||||
if err := validateColumnNames(sessionColumns); err != nil {
|
||||
log.Printf("can't validate column names: %s", err)
|
||||
log.Error(ctx, "can't validate sessions column names: %s", err)
|
||||
}
|
||||
// Validate column names in events table
|
||||
if err := validateColumnNames(eventColumns); err != nil {
|
||||
log.Printf("can't validate column names: %s", err)
|
||||
log.Error(ctx, "can't validate events column names: %s", err)
|
||||
}
|
||||
return &Saver{
|
||||
log: log,
|
||||
cfg: cfg,
|
||||
db: db,
|
||||
sessModule: sessions,
|
||||
|
|
@ -110,13 +114,14 @@ func (s *Saver) updateSessionInfoFromCache(sessID uint64, sess map[string]string
|
|||
sess["session_end_timestamp"] = fmt.Sprintf("%d", info.Timestamp+*info.Duration)
|
||||
}
|
||||
if sess["session_duration"] == "" && sess["session_start_timestamp"] != "" && sess["session_end_timestamp"] != "" {
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sessID)
|
||||
start, err := strconv.Atoi(sess["session_start_timestamp"])
|
||||
if err != nil {
|
||||
log.Printf("Error parsing session_start_timestamp: %v", err)
|
||||
s.log.Error(ctx, "error parsing session_start_timestamp: %s", err)
|
||||
}
|
||||
end, err := strconv.Atoi(sess["session_end_timestamp"])
|
||||
if err != nil {
|
||||
log.Printf("Error parsing session_end_timestamp: %v", err)
|
||||
s.log.Error(ctx, "error parsing session_end_timestamp: %s", err)
|
||||
}
|
||||
if start != 0 && end != 0 {
|
||||
sess["session_duration"] = fmt.Sprintf("%d", end-start)
|
||||
|
|
@ -207,12 +212,13 @@ func (s *Saver) handleSession(msg messages.Message) {
|
|||
if s.sessions == nil {
|
||||
s.sessions = make(map[uint64]map[string]string)
|
||||
}
|
||||
ctx := context.WithValue(context.Background(), "sessionID", msg.SessionID())
|
||||
sess, ok := s.sessions[msg.SessionID()]
|
||||
if !ok {
|
||||
// Try to load session from cache
|
||||
cached, err := s.sessModule.GetCached(msg.SessionID())
|
||||
if err != nil && err != sessions.ErrSessionNotFound {
|
||||
log.Printf("Failed to get cached session: %v", err)
|
||||
s.log.Warn(ctx, "failed to get cached session: %s", err)
|
||||
}
|
||||
if cached != nil {
|
||||
sess = cached
|
||||
|
|
@ -251,7 +257,7 @@ func (s *Saver) handleSession(msg messages.Message) {
|
|||
case *messages.SessionEnd:
|
||||
sess["session_end_timestamp"] = fmt.Sprintf("%d", m.Timestamp)
|
||||
if err := s.updateSessionInfoFromCache(msg.SessionID(), sess); err != nil {
|
||||
log.Printf("Error updating session info from cache: %v", err)
|
||||
s.log.Warn(ctx, "failed to update session info from cache: %s", err)
|
||||
}
|
||||
case *messages.ConnectionInformation:
|
||||
sess["connection_effective_bandwidth"] = fmt.Sprintf("%d", m.Downlink)
|
||||
|
|
@ -259,12 +265,12 @@ func (s *Saver) handleSession(msg messages.Message) {
|
|||
case *messages.Metadata:
|
||||
session, err := s.sessModule.Get(msg.SessionID())
|
||||
if err != nil {
|
||||
log.Printf("Error getting session info: %v", err)
|
||||
s.log.Error(ctx, "error getting session info: %s", err)
|
||||
break
|
||||
}
|
||||
project, err := s.projModule.GetProject(session.ProjectID)
|
||||
if err != nil {
|
||||
log.Printf("Error getting project info: %v", err)
|
||||
s.log.Error(ctx, "error getting project info: %s", err)
|
||||
break
|
||||
}
|
||||
keyNo := project.GetMetadataNo(m.Key)
|
||||
|
|
@ -353,18 +359,18 @@ func (s *Saver) Handle(msg messages.Message) {
|
|||
|
||||
func (s *Saver) commitEvents() {
|
||||
if len(s.events) == 0 {
|
||||
log.Printf("empty events batch")
|
||||
s.log.Info(context.Background(), "empty events batch")
|
||||
return
|
||||
}
|
||||
if err := s.db.InsertEvents(s.events); err != nil {
|
||||
log.Printf("can't insert events: %s", err)
|
||||
s.log.Error(context.Background(), "can't insert events: %s", err)
|
||||
}
|
||||
s.events = nil
|
||||
}
|
||||
|
||||
func (s *Saver) commitSessions() {
|
||||
if len(s.finishedSessions) == 0 {
|
||||
log.Printf("empty sessions batch")
|
||||
s.log.Info(context.Background(), "empty sessions batch")
|
||||
return
|
||||
}
|
||||
l := len(s.finishedSessions)
|
||||
|
|
@ -381,9 +387,9 @@ func (s *Saver) commitSessions() {
|
|||
}
|
||||
}
|
||||
if err := s.db.InsertSessions(sessions); err != nil {
|
||||
log.Printf("can't insert sessions: %s", err)
|
||||
s.log.Error(context.Background(), "can't insert sessions: %s", err)
|
||||
}
|
||||
log.Printf("finished: %d, to keep: %d, to send: %d", l, len(toKeep), len(toSend))
|
||||
s.log.Info(context.Background(), "finished: %d, to keep: %d, to send: %d", l, len(toKeep), len(toSend))
|
||||
// Clear current list of finished sessions
|
||||
for _, sessionID := range toSend {
|
||||
delete(s.sessions, sessionID) // delete session info
|
||||
|
|
@ -398,10 +404,11 @@ func (s *Saver) Commit() {
|
|||
start := time.Now()
|
||||
for sessionID, _ := range s.updatedSessions {
|
||||
if err := s.sessModule.AddCached(sessionID, s.sessions[sessionID]); err != nil {
|
||||
log.Printf("Error adding session to cache: %v", err)
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sessionID)
|
||||
s.log.Error(ctx, "can't add session to cache: %s", err)
|
||||
}
|
||||
}
|
||||
log.Printf("Cached %d sessions in %s", len(s.updatedSessions), time.Since(start))
|
||||
s.log.Info(context.Background(), "cached %d sessions in %s", len(s.updatedSessions), time.Since(start))
|
||||
s.updatedSessions = nil
|
||||
// Commit events and sessions (send to Redshift)
|
||||
s.commitEvents()
|
||||
|
|
@ -428,12 +435,13 @@ func (s *Saver) checkZombieSessions() {
|
|||
// Do that several times (save attempts number) after last attempt delete session from memory to avoid sessions with not filled fields
|
||||
zombieSession := s.sessions[sessionID]
|
||||
if zombieSession["session_start_timestamp"] == "" || zombieSession["session_end_timestamp"] == "" {
|
||||
ctx := context.WithValue(context.Background(), "sessionID", sessionID)
|
||||
// Let's try to load session from cache
|
||||
if err := s.updateSessionInfoFromCache(sessionID, zombieSession); err != nil {
|
||||
log.Printf("Error updating zombie session info from cache: %v", err)
|
||||
s.log.Warn(ctx, "failed to update zombie session info from cache: %s", err)
|
||||
} else {
|
||||
s.sessions[sessionID] = zombieSession
|
||||
log.Printf("Updated zombie session info from cache: %v", zombieSession)
|
||||
s.log.Info(ctx, "updated zombie session info from cache: %v", zombieSession)
|
||||
}
|
||||
}
|
||||
if zombieSession["session_start_timestamp"] == "" || zombieSession["session_end_timestamp"] == "" {
|
||||
|
|
@ -445,7 +453,7 @@ func (s *Saver) checkZombieSessions() {
|
|||
}
|
||||
}
|
||||
if zombieSessionsCount > 0 {
|
||||
log.Printf("Found %d zombie sessions", zombieSessionsCount)
|
||||
s.log.Info(context.Background(), "found %d zombie sessions", zombieSessionsCount)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue