Observability upgrade (#3146)

* feat(metrics): grand update

* feat(metrics): fixed missing part in ee tracer

* feat(assets): added missing arg

* feat(metrics): fixed naming problems
This commit is contained in:
Alexander 2025-03-13 08:09:29 +01:00 committed by GitHub
parent fe1130397c
commit 3b3e95a413
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
62 changed files with 1901 additions and 1619 deletions

View file

@ -8,8 +8,7 @@ import (
"openreplay/backend/pkg/db/postgres/pool" "openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/metrics" "openreplay/backend/pkg/metrics"
analyticsMetrics "openreplay/backend/pkg/metrics/analytics" "openreplay/backend/pkg/metrics/database"
databaseMetrics "openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/metrics/web" "openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/server" "openreplay/backend/pkg/server"
"openreplay/backend/pkg/server/api" "openreplay/backend/pkg/server/api"
@ -19,16 +18,18 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := analyticsConfig.New(log) cfg := analyticsConfig.New(log)
// Observability
webMetrics := web.New("analytics") webMetrics := web.New("analytics")
metrics.New(log, append(webMetrics.List(), append(analyticsMetrics.List(), databaseMetrics.List()...)...)) dbMetrics := database.New("analytics")
metrics.New(log, append(webMetrics.List(), dbMetrics.List()...))
pgConn, err := pool.New(cfg.Postgres.String()) pgConn, err := pool.New(dbMetrics, cfg.Postgres.String())
if err != nil { if err != nil {
log.Fatal(ctx, "can't init postgres connection: %s", err) log.Fatal(ctx, "can't init postgres connection: %s", err)
} }
defer pgConn.Close() defer pgConn.Close()
builder, err := analytics.NewServiceBuilder(log, cfg, webMetrics, pgConn) builder, err := analytics.NewServiceBuilder(log, cfg, webMetrics, dbMetrics, pgConn)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init services: %s", err) log.Fatal(ctx, "can't init services: %s", err)
} }

View file

@ -22,13 +22,15 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := config.New(log) cfg := config.New(log)
metrics.New(log, assetsMetrics.List()) // Observability
assetMetrics := assetsMetrics.New("assets")
metrics.New(log, assetMetrics.List())
objStore, err := store.NewStore(&cfg.ObjectsConfig) objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init object storage: %s", err) log.Fatal(ctx, "can't init object storage: %s", err)
} }
cacher, err := cacher.NewCacher(cfg, objStore) cacher, err := cacher.NewCacher(cfg, objStore, assetMetrics)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init cacher: %s", err) log.Fatal(ctx, "can't init cacher: %s", err)
} }
@ -37,7 +39,7 @@ func main() {
switch m := msg.(type) { switch m := msg.(type) {
case *messages.AssetCache: case *messages.AssetCache:
cacher.CacheURL(m.SessionID(), m.URL) cacher.CacheURL(m.SessionID(), m.URL)
assetsMetrics.IncreaseProcessesSessions() assetMetrics.IncreaseProcessesSessions()
case *messages.JSException: case *messages.JSException:
sourceList, err := assets.ExtractJSExceptionSources(&m.Payload) sourceList, err := assets.ExtractJSExceptionSources(&m.Payload)
if err != nil { if err != nil {

View file

@ -22,6 +22,7 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := config.New(log) cfg := config.New(log)
// Observability
canvasMetrics := canvasesMetrics.New("canvases") canvasMetrics := canvasesMetrics.New("canvases")
metrics.New(log, canvasMetrics.List()) metrics.New(log, canvasMetrics.List())

View file

@ -14,7 +14,7 @@ import (
"openreplay/backend/pkg/memory" "openreplay/backend/pkg/memory"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/metrics" "openreplay/backend/pkg/metrics"
databaseMetrics "openreplay/backend/pkg/metrics/database" "openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/projects" "openreplay/backend/pkg/projects"
"openreplay/backend/pkg/queue" "openreplay/backend/pkg/queue"
"openreplay/backend/pkg/sessions" "openreplay/backend/pkg/sessions"
@ -26,22 +26,24 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := config.New(log) cfg := config.New(log)
metrics.New(log, databaseMetrics.List()) // Observability
dbMetric := database.New("db")
metrics.New(log, dbMetric.List())
pgConn, err := pool.New(cfg.Postgres.String()) pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
if err != nil { if err != nil {
log.Fatal(ctx, "can't init postgres connection: %s", err) log.Fatal(ctx, "can't init postgres connection: %s", err)
} }
defer pgConn.Close() defer pgConn.Close()
chConn := clickhouse.NewConnector(cfg.Clickhouse) chConn := clickhouse.NewConnector(cfg.Clickhouse, dbMetric)
if err := chConn.Prepare(); err != nil { if err := chConn.Prepare(); err != nil {
log.Fatal(ctx, "can't prepare clickhouse: %s", err) log.Fatal(ctx, "can't prepare clickhouse: %s", err)
} }
defer chConn.Stop() defer chConn.Stop()
// Init db proxy module (postgres + clickhouse + batches) // Init db proxy module (postgres + clickhouse + batches)
dbProxy := postgres.NewConn(log, pgConn, chConn) dbProxy := postgres.NewConn(log, pgConn, chConn, dbMetric)
defer dbProxy.Close() defer dbProxy.Close()
// Init redis connection // Init redis connection
@ -51,8 +53,8 @@ func main() {
} }
defer redisClient.Close() defer redisClient.Close()
projManager := projects.New(log, pgConn, redisClient) projManager := projects.New(log, pgConn, redisClient, dbMetric)
sessManager := sessions.New(log, pgConn, projManager, redisClient) sessManager := sessions.New(log, pgConn, projManager, redisClient, dbMetric)
tagsManager := tags.New(log, pgConn) tagsManager := tags.New(log, pgConn)
// Init data saver // Init data saver

View file

@ -19,7 +19,7 @@ import (
"openreplay/backend/pkg/memory" "openreplay/backend/pkg/memory"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/metrics" "openreplay/backend/pkg/metrics"
databaseMetrics "openreplay/backend/pkg/metrics/database" "openreplay/backend/pkg/metrics/database"
enderMetrics "openreplay/backend/pkg/metrics/ender" enderMetrics "openreplay/backend/pkg/metrics/ender"
"openreplay/backend/pkg/projects" "openreplay/backend/pkg/projects"
"openreplay/backend/pkg/queue" "openreplay/backend/pkg/queue"
@ -31,9 +31,12 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := ender.New(log) cfg := ender.New(log)
metrics.New(log, append(enderMetrics.List(), databaseMetrics.List()...)) // Observability
dbMetric := database.New("ender")
enderMetric := enderMetrics.New("ender")
metrics.New(log, append(enderMetric.List(), dbMetric.List()...))
pgConn, err := pool.New(cfg.Postgres.String()) pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
if err != nil { if err != nil {
log.Fatal(ctx, "can't init postgres connection: %s", err) log.Fatal(ctx, "can't init postgres connection: %s", err)
} }
@ -45,10 +48,10 @@ func main() {
} }
defer redisClient.Close() defer redisClient.Close()
projManager := projects.New(log, pgConn, redisClient) projManager := projects.New(log, pgConn, redisClient, dbMetric)
sessManager := sessions.New(log, pgConn, projManager, redisClient) sessManager := sessions.New(log, pgConn, projManager, redisClient, dbMetric)
sessionEndGenerator, err := sessionender.New(intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber) sessionEndGenerator, err := sessionender.New(enderMetric, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init ender service: %s", err) log.Fatal(ctx, "can't init ender service: %s", err)
} }

View file

@ -23,7 +23,9 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := config.New(log) cfg := config.New(log)
metrics.New(log, heuristicsMetrics.List()) // Observability
heuristicsMetric := heuristicsMetrics.New("heuristics")
metrics.New(log, heuristicsMetric.List())
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message. // HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
handlersFabric := func() []handlers.MessageProcessor { handlersFabric := func() []handlers.MessageProcessor {
@ -62,7 +64,7 @@ func main() {
} }
// Run service and wait for TERM signal // Run service and wait for TERM signal
service := heuristics.New(log, cfg, producer, consumer, eventBuilder, memoryManager) service := heuristics.New(log, cfg, producer, consumer, eventBuilder, memoryManager, heuristicsMetric)
log.Info(ctx, "Heuristics service started") log.Info(ctx, "Heuristics service started")
terminator.Wait(log, service) terminator.Wait(log, service)
} }

View file

@ -9,7 +9,7 @@ import (
"openreplay/backend/pkg/db/redis" "openreplay/backend/pkg/db/redis"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/metrics" "openreplay/backend/pkg/metrics"
databaseMetrics "openreplay/backend/pkg/metrics/database" "openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/metrics/web" "openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/queue" "openreplay/backend/pkg/queue"
"openreplay/backend/pkg/server" "openreplay/backend/pkg/server"
@ -20,13 +20,15 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := http.New(log) cfg := http.New(log)
// Observability
webMetrics := web.New("http") webMetrics := web.New("http")
metrics.New(log, append(webMetrics.List(), databaseMetrics.List()...)) dbMetric := database.New("http")
metrics.New(log, append(webMetrics.List(), dbMetric.List()...))
producer := queue.NewProducer(cfg.MessageSizeLimit, true) producer := queue.NewProducer(cfg.MessageSizeLimit, true)
defer producer.Close(15000) defer producer.Close(15000)
pgConn, err := pool.New(cfg.Postgres.String()) pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
if err != nil { if err != nil {
log.Fatal(ctx, "can't init postgres connection: %s", err) log.Fatal(ctx, "can't init postgres connection: %s", err)
} }
@ -38,7 +40,7 @@ func main() {
} }
defer redisClient.Close() defer redisClient.Close()
builder, err := services.New(log, cfg, webMetrics, producer, pgConn, redisClient) builder, err := services.New(log, cfg, webMetrics, dbMetric, producer, pgConn, redisClient)
if err != nil { if err != nil {
log.Fatal(ctx, "failed while creating services: %s", err) log.Fatal(ctx, "failed while creating services: %s", err)
} }

View file

@ -23,6 +23,7 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := config.New(log) cfg := config.New(log)
// Observability
imageMetrics := imagesMetrics.New("images") imageMetrics := imagesMetrics.New("images")
metrics.New(log, imageMetrics.List()) metrics.New(log, imageMetrics.List())

View file

@ -18,16 +18,18 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := config.New(log) cfg := config.New(log)
// Observability
webMetrics := web.New("integrations") webMetrics := web.New("integrations")
metrics.New(log, append(webMetrics.List(), database.List()...)) dbMetric := database.New("integrations")
metrics.New(log, append(webMetrics.List(), dbMetric.List()...))
pgConn, err := pool.New(cfg.Postgres.String()) pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
if err != nil { if err != nil {
log.Fatal(ctx, "can't init postgres connection: %s", err) log.Fatal(ctx, "can't init postgres connection: %s", err)
} }
defer pgConn.Close() defer pgConn.Close()
builder, err := integrations.NewServiceBuilder(log, cfg, webMetrics, pgConn) builder, err := integrations.NewServiceBuilder(log, cfg, webMetrics, dbMetric, pgConn)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init services: %s", err) log.Fatal(ctx, "can't init services: %s", err)
} }

View file

@ -9,14 +9,14 @@ import (
"syscall" "syscall"
"time" "time"
"openreplay/backend/internal/config/sink" config "openreplay/backend/internal/config/sink"
"openreplay/backend/internal/sink/assetscache" "openreplay/backend/internal/sink/assetscache"
"openreplay/backend/internal/sink/sessionwriter" "openreplay/backend/internal/sink/sessionwriter"
"openreplay/backend/internal/storage" "openreplay/backend/internal/storage"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/metrics" "openreplay/backend/pkg/metrics"
sinkMetrics "openreplay/backend/pkg/metrics/sink" "openreplay/backend/pkg/metrics/sink"
"openreplay/backend/pkg/queue" "openreplay/backend/pkg/queue"
"openreplay/backend/pkg/url/assets" "openreplay/backend/pkg/url/assets"
) )
@ -24,7 +24,9 @@ import (
func main() { func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := sink.New(log) cfg := config.New(log)
// Observability
sinkMetrics := sink.New("sink")
metrics.New(log, sinkMetrics.List()) metrics.New(log, sinkMetrics.List())
if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) { if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) {
@ -39,7 +41,7 @@ func main() {
if err != nil { if err != nil {
log.Fatal(ctx, "can't init rewriter: %s", err) log.Fatal(ctx, "can't init rewriter: %s", err)
} }
assetMessageHandler := assetscache.New(log, cfg, rewriter, producer) assetMessageHandler := assetscache.New(log, cfg, rewriter, producer, sinkMetrics)
counter := storage.NewLogCounter() counter := storage.NewLogCounter()
var ( var (
@ -191,7 +193,7 @@ func main() {
cfg.TopicRawWeb, cfg.TopicRawWeb,
cfg.TopicRawMobile, cfg.TopicRawMobile,
}, },
messages.NewSinkMessageIterator(log, msgHandler, nil, false), messages.NewSinkMessageIterator(log, msgHandler, nil, false, sinkMetrics),
false, false,
cfg.MessageSizeLimit, cfg.MessageSizeLimit,
) )

View file

@ -19,17 +19,20 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := spotConfig.New(log) cfg := spotConfig.New(log)
// Observability
webMetrics := web.New("spot") webMetrics := web.New("spot")
metrics.New(log, append(webMetrics.List(), append(spotMetrics.List(), databaseMetrics.List()...)...)) spotMetric := spotMetrics.New("spot")
dbMetric := databaseMetrics.New("spot")
metrics.New(log, append(webMetrics.List(), append(spotMetric.List(), dbMetric.List()...)...))
pgConn, err := pool.New(cfg.Postgres.String()) pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
if err != nil { if err != nil {
log.Fatal(ctx, "can't init postgres connection: %s", err) log.Fatal(ctx, "can't init postgres connection: %s", err)
} }
defer pgConn.Close() defer pgConn.Close()
prefix := api.NoPrefix prefix := api.NoPrefix
builder, err := spot.NewServiceBuilder(log, cfg, webMetrics, pgConn, prefix) builder, err := spot.NewServiceBuilder(log, cfg, webMetrics, spotMetric, dbMetric, pgConn, prefix)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init services: %s", err) log.Fatal(ctx, "can't init services: %s", err)
} }

View file

@ -23,13 +23,15 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := config.New(log) cfg := config.New(log)
metrics.New(log, storageMetrics.List()) // Observability
storageMetric := storageMetrics.New("storage")
metrics.New(log, storageMetric.List())
objStore, err := store.NewStore(&cfg.ObjectsConfig) objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init object storage: %s", err) log.Fatal(ctx, "can't init object storage: %s", err)
} }
srv, err := storage.New(cfg, log, objStore) srv, err := storage.New(cfg, log, objStore, storageMetric)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init storage service: %s", err) log.Fatal(ctx, "can't init storage service: %s", err)
} }

View file

@ -27,6 +27,7 @@ type cacher struct {
objStorage objectstorage.ObjectStorage // AWS Docs: "These clients are safe to use concurrently." objStorage objectstorage.ObjectStorage // AWS Docs: "These clients are safe to use concurrently."
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines." httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
rewriter *assets.Rewriter // Read only rewriter *assets.Rewriter // Read only
metrics metrics.Assets
Errors chan error Errors chan error
sizeLimit int sizeLimit int
requestHeaders map[string]string requestHeaders map[string]string
@ -37,7 +38,7 @@ func (c *cacher) CanCache() bool {
return c.workers.CanAddTask() return c.workers.CanAddTask()
} }
func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage) (*cacher, error) { func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage, metrics metrics.Assets) (*cacher, error) {
switch { switch {
case cfg == nil: case cfg == nil:
return nil, errors.New("config is nil") return nil, errors.New("config is nil")
@ -93,6 +94,7 @@ func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage) (*cacher,
Errors: make(chan error), Errors: make(chan error),
sizeLimit: cfg.AssetsSizeLimit, sizeLimit: cfg.AssetsSizeLimit,
requestHeaders: cfg.AssetsRequestHeaders, requestHeaders: cfg.AssetsRequestHeaders,
metrics: metrics,
} }
c.workers = NewPool(64, c.CacheFile) c.workers = NewPool(64, c.CacheFile)
return c, nil return c, nil
@ -115,7 +117,7 @@ func (c *cacher) cacheURL(t *Task) {
c.Errors <- errors.Wrap(err, t.urlContext) c.Errors <- errors.Wrap(err, t.urlContext)
return return
} }
metrics.RecordDownloadDuration(float64(time.Now().Sub(start).Milliseconds()), res.StatusCode) c.metrics.RecordDownloadDuration(float64(time.Now().Sub(start).Milliseconds()), res.StatusCode)
defer res.Body.Close() defer res.Body.Close()
if res.StatusCode >= 400 { if res.StatusCode >= 400 {
printErr := true printErr := true
@ -162,12 +164,12 @@ func (c *cacher) cacheURL(t *Task) {
start = time.Now() start = time.Now()
err = c.objStorage.Upload(strings.NewReader(strData), t.cachePath, contentType, contentEncoding, objectstorage.NoCompression) err = c.objStorage.Upload(strings.NewReader(strData), t.cachePath, contentType, contentEncoding, objectstorage.NoCompression)
if err != nil { if err != nil {
metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true) c.metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true)
c.Errors <- errors.Wrap(err, t.urlContext) c.Errors <- errors.Wrap(err, t.urlContext)
return return
} }
metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), false) c.metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), false)
metrics.IncreaseSavedSessions() c.metrics.IncreaseSavedSessions()
if isCSS { if isCSS {
if t.depth > 0 { if t.depth > 0 {

View file

@ -11,7 +11,7 @@ import (
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/memory" "openreplay/backend/pkg/memory"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
metrics "openreplay/backend/pkg/metrics/heuristics" heuristicMetrics "openreplay/backend/pkg/metrics/heuristics"
"openreplay/backend/pkg/queue/types" "openreplay/backend/pkg/queue/types"
) )
@ -23,11 +23,12 @@ type heuristicsImpl struct {
consumer types.Consumer consumer types.Consumer
events builders.EventBuilder events builders.EventBuilder
mm memory.Manager mm memory.Manager
metrics heuristicMetrics.Heuristics
done chan struct{} done chan struct{}
finished chan struct{} finished chan struct{}
} }
func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Consumer, e builders.EventBuilder, mm memory.Manager) service.Interface { func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Consumer, e builders.EventBuilder, mm memory.Manager, metrics heuristicMetrics.Heuristics) service.Interface {
s := &heuristicsImpl{ s := &heuristicsImpl{
log: log, log: log,
ctx: context.Background(), ctx: context.Background(),
@ -36,6 +37,7 @@ func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Co
consumer: c, consumer: c,
events: e, events: e,
mm: mm, mm: mm,
metrics: metrics,
done: make(chan struct{}), done: make(chan struct{}),
finished: make(chan struct{}), finished: make(chan struct{}),
} }
@ -51,7 +53,7 @@ func (h *heuristicsImpl) run() {
if err := h.producer.Produce(h.cfg.TopicAnalytics, evt.SessionID(), evt.Encode()); err != nil { if err := h.producer.Produce(h.cfg.TopicAnalytics, evt.SessionID(), evt.Encode()); err != nil {
h.log.Error(h.ctx, "can't send new event to queue: %s", err) h.log.Error(h.ctx, "can't send new event to queue: %s", err)
} else { } else {
metrics.IncreaseTotalEvents(messageTypeName(evt)) h.metrics.IncreaseTotalEvents(messageTypeName(evt))
} }
case <-tick: case <-tick:
h.producer.Flush(h.cfg.ProducerTimeout) h.producer.Flush(h.cfg.ProducerTimeout)

View file

@ -12,6 +12,7 @@ import (
featureflagsAPI "openreplay/backend/pkg/featureflags/api" featureflagsAPI "openreplay/backend/pkg/featureflags/api"
"openreplay/backend/pkg/flakeid" "openreplay/backend/pkg/flakeid"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/metrics/web" "openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/objectstorage/store" "openreplay/backend/pkg/objectstorage/store"
"openreplay/backend/pkg/projects" "openreplay/backend/pkg/projects"
@ -36,8 +37,8 @@ type ServicesBuilder struct {
UxTestsAPI api.Handlers UxTestsAPI api.Handlers
} }
func New(log logger.Logger, cfg *http.Config, metrics web.Web, producer types.Producer, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) { func New(log logger.Logger, cfg *http.Config, webMetrics web.Web, dbMetrics database.Database, producer types.Producer, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) {
projs := projects.New(log, pgconn, redis) projs := projects.New(log, pgconn, redis, dbMetrics)
objStore, err := store.NewStore(&cfg.ObjectsConfig) objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil { if err != nil {
return nil, err return nil, err
@ -53,11 +54,11 @@ func New(log logger.Logger, cfg *http.Config, metrics web.Web, producer types.Pr
tokenizer := token.NewTokenizer(cfg.TokenSecret) tokenizer := token.NewTokenizer(cfg.TokenSecret)
conditions := conditions.New(pgconn) conditions := conditions.New(pgconn)
flaker := flakeid.NewFlaker(cfg.WorkerID) flaker := flakeid.NewFlaker(cfg.WorkerID)
sessions := sessions.New(log, pgconn, projs, redis) sessions := sessions.New(log, pgconn, projs, redis, dbMetrics)
featureFlags := featureflags.New(pgconn) featureFlags := featureflags.New(pgconn)
tags := tags.New(log, pgconn) tags := tags.New(log, pgconn)
uxTesting := uxtesting.New(pgconn) uxTesting := uxtesting.New(pgconn)
responser := api.NewResponser(metrics) responser := api.NewResponser(webMetrics)
builder := &ServicesBuilder{} builder := &ServicesBuilder{}
if builder.WebAPI, err = websessions.NewHandlers(cfg, log, responser, producer, projs, sessions, uaModule, geoModule, tokenizer, conditions, flaker); err != nil { if builder.WebAPI, err = websessions.NewHandlers(cfg, log, responser, producer, projs, sessions, uaModule, geoModule, tokenizer, conditions, flaker); err != nil {
return nil, err return nil, err

View file

@ -21,6 +21,7 @@ type session struct {
// SessionEnder updates timestamp of last message for each session // SessionEnder updates timestamp of last message for each session
type SessionEnder struct { type SessionEnder struct {
metrics ender.Ender
timeout int64 timeout int64
sessions map[uint64]*session // map[sessionID]session sessions map[uint64]*session // map[sessionID]session
timeCtrl *timeController timeCtrl *timeController
@ -28,8 +29,9 @@ type SessionEnder struct {
enabled bool enabled bool
} }
func New(timeout int64, parts int) (*SessionEnder, error) { func New(metrics ender.Ender, timeout int64, parts int) (*SessionEnder, error) {
return &SessionEnder{ return &SessionEnder{
metrics: metrics,
timeout: timeout, timeout: timeout,
sessions: make(map[uint64]*session), sessions: make(map[uint64]*session),
timeCtrl: NewTimeController(parts), timeCtrl: NewTimeController(parts),
@ -56,7 +58,7 @@ func (se *SessionEnder) ActivePartitions(parts []uint64) {
for sessID, _ := range se.sessions { for sessID, _ := range se.sessions {
if !activeParts[sessID%se.parts] { if !activeParts[sessID%se.parts] {
delete(se.sessions, sessID) delete(se.sessions, sessID)
ender.DecreaseActiveSessions() se.metrics.DecreaseActiveSessions()
removedSessions++ removedSessions++
} else { } else {
activeSessions++ activeSessions++
@ -89,8 +91,8 @@ func (se *SessionEnder) UpdateSession(msg messages.Message) {
isEnded: false, isEnded: false,
isMobile: messages.IsMobileType(msg.TypeID()), isMobile: messages.IsMobileType(msg.TypeID()),
} }
ender.IncreaseActiveSessions() se.metrics.IncreaseActiveSessions()
ender.IncreaseTotalSessions() se.metrics.IncreaseTotalSessions()
return return
} }
// Keep the highest user's timestamp for correct session duration value // Keep the highest user's timestamp for correct session duration value
@ -139,8 +141,8 @@ func (se *SessionEnder) HandleEndedSessions(handler EndedSessionHandler) {
sess.isEnded = true sess.isEnded = true
if res, _ := handler(sessID, sess.lastUserTime); res { if res, _ := handler(sessID, sess.lastUserTime); res {
delete(se.sessions, sessID) delete(se.sessions, sessID)
ender.DecreaseActiveSessions() se.metrics.DecreaseActiveSessions()
ender.IncreaseClosedSessions() se.metrics.IncreaseClosedSessions()
removedSessions++ removedSessions++
if endCase == 2 { if endCase == 2 {
brokerTime[1]++ brokerTime[1]++

View file

@ -12,7 +12,7 @@ import (
"openreplay/backend/internal/config/sink" "openreplay/backend/internal/config/sink"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
metrics "openreplay/backend/pkg/metrics/sink" sinkMetrics "openreplay/backend/pkg/metrics/sink"
"openreplay/backend/pkg/queue/types" "openreplay/backend/pkg/queue/types"
"openreplay/backend/pkg/url/assets" "openreplay/backend/pkg/url/assets"
) )
@ -30,9 +30,10 @@ type AssetsCache struct {
producer types.Producer producer types.Producer
cache map[string]*CachedAsset cache map[string]*CachedAsset
blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain
metrics sinkMetrics.Sink
} }
func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer) *AssetsCache { func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, metrics sinkMetrics.Sink) *AssetsCache {
assetsCache := &AssetsCache{ assetsCache := &AssetsCache{
log: log, log: log,
cfg: cfg, cfg: cfg,
@ -40,6 +41,7 @@ func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, produce
producer: producer, producer: producer,
cache: make(map[string]*CachedAsset, 64), cache: make(map[string]*CachedAsset, 64),
blackList: make([]string, 0), blackList: make([]string, 0),
metrics: metrics,
} }
// Parse black list for cache layer // Parse black list for cache layer
if len(cfg.CacheBlackList) > 0 { if len(cfg.CacheBlackList) > 0 {
@ -76,7 +78,7 @@ func (e *AssetsCache) clearCache() {
if int64(now.Sub(cache.ts).Minutes()) > e.cfg.CacheExpiration { if int64(now.Sub(cache.ts).Minutes()) > e.cfg.CacheExpiration {
deleted++ deleted++
delete(e.cache, id) delete(e.cache, id)
metrics.DecreaseCachedAssets() e.metrics.DecreaseCachedAssets()
} }
} }
e.log.Info(context.Background(), "cache cleaner: deleted %d/%d assets", deleted, cacheSize) e.log.Info(context.Background(), "cache cleaner: deleted %d/%d assets", deleted, cacheSize)
@ -194,7 +196,7 @@ func parseHost(baseURL string) (string, error) {
} }
func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string { func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string {
metrics.IncreaseTotalAssets() e.metrics.IncreaseTotalAssets()
// Try to find asset in cache // Try to find asset in cache
h := md5.New() h := md5.New()
// Cut first part of url (scheme + host) // Cut first part of url (scheme + host)
@ -217,7 +219,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
e.mutex.RUnlock() e.mutex.RUnlock()
if ok { if ok {
if int64(time.Now().Sub(cachedAsset.ts).Minutes()) < e.cfg.CacheExpiration { if int64(time.Now().Sub(cachedAsset.ts).Minutes()) < e.cfg.CacheExpiration {
metrics.IncreaseSkippedAssets() e.metrics.IncreaseSkippedAssets()
return cachedAsset.msg return cachedAsset.msg
} }
} }
@ -229,8 +231,8 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
start := time.Now() start := time.Now()
res := e.getRewrittenCSS(sessionID, baseURL, css) res := e.getRewrittenCSS(sessionID, baseURL, css)
duration := time.Now().Sub(start).Milliseconds() duration := time.Now().Sub(start).Milliseconds()
metrics.RecordAssetSize(float64(len(res))) e.metrics.RecordAssetSize(float64(len(res)))
metrics.RecordProcessAssetDuration(float64(duration)) e.metrics.RecordProcessAssetDuration(float64(duration))
// Save asset to cache if we spent more than threshold // Save asset to cache if we spent more than threshold
if duration > e.cfg.CacheThreshold { if duration > e.cfg.CacheThreshold {
e.mutex.Lock() e.mutex.Lock()
@ -239,7 +241,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
ts: time.Now(), ts: time.Now(),
} }
e.mutex.Unlock() e.mutex.Unlock()
metrics.IncreaseCachedAssets() e.metrics.IncreaseCachedAssets()
} }
// Return rewritten asset // Return rewritten asset
return res return res

View file

@ -18,7 +18,7 @@ import (
config "openreplay/backend/internal/config/storage" config "openreplay/backend/internal/config/storage"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
metrics "openreplay/backend/pkg/metrics/storage" storageMetrics "openreplay/backend/pkg/metrics/storage"
"openreplay/backend/pkg/objectstorage" "openreplay/backend/pkg/objectstorage"
"openreplay/backend/pkg/pool" "openreplay/backend/pkg/pool"
) )
@ -77,9 +77,10 @@ type Storage struct {
splitTime uint64 splitTime uint64
processorPool pool.WorkerPool processorPool pool.WorkerPool
uploaderPool pool.WorkerPool uploaderPool pool.WorkerPool
metrics storageMetrics.Storage
} }
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage) (*Storage, error) { func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage, metrics storageMetrics.Storage) (*Storage, error) {
switch { switch {
case cfg == nil: case cfg == nil:
return nil, fmt.Errorf("config is empty") return nil, fmt.Errorf("config is empty")
@ -92,6 +93,7 @@ func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectS
objStorage: objStorage, objStorage: objStorage,
startBytes: make([]byte, cfg.FileSplitSize), startBytes: make([]byte, cfg.FileSplitSize),
splitTime: parseSplitTime(cfg.FileSplitTime), splitTime: parseSplitTime(cfg.FileSplitTime),
metrics: metrics,
} }
s.processorPool = pool.NewPool(1, 1, s.doCompression) s.processorPool = pool.NewPool(1, 1, s.doCompression)
s.uploaderPool = pool.NewPool(1, 1, s.uploadSession) s.uploaderPool = pool.NewPool(1, 1, s.uploadSession)
@ -141,7 +143,7 @@ func (s *Storage) Process(ctx context.Context, msg *messages.SessionEnd) (err er
if err != nil { if err != nil {
if strings.Contains(err.Error(), "big file") { if strings.Contains(err.Error(), "big file") {
s.log.Warn(ctx, "can't process session: %s", err) s.log.Warn(ctx, "can't process session: %s", err)
metrics.IncreaseStorageTotalSkippedSessions() s.metrics.IncreaseStorageTotalSkippedSessions()
return nil return nil
} }
return err return err
@ -159,8 +161,8 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error {
return err return err
} }
metrics.RecordSessionReadDuration(float64(time.Now().Sub(startRead).Milliseconds()), tp.String()) s.metrics.RecordSessionReadDuration(float64(time.Now().Sub(startRead).Milliseconds()), tp.String())
metrics.RecordSessionSize(float64(len(mob)), tp.String()) s.metrics.RecordSessionSize(float64(len(mob)), tp.String())
// Put opened session file into task struct // Put opened session file into task struct
task.SetMob(mob, index, tp) task.SetMob(mob, index, tp)
@ -174,7 +176,7 @@ func (s *Storage) openSession(ctx context.Context, filePath string, tp FileType)
// Check file size before download into memory // Check file size before download into memory
info, err := os.Stat(filePath) info, err := os.Stat(filePath)
if err == nil && info.Size() > s.cfg.MaxFileSize { if err == nil && info.Size() > s.cfg.MaxFileSize {
metrics.RecordSkippedSessionSize(float64(info.Size()), tp.String()) s.metrics.RecordSkippedSessionSize(float64(info.Size()), tp.String())
return nil, -1, fmt.Errorf("big file, size: %d", info.Size()) return nil, -1, fmt.Errorf("big file, size: %d", info.Size())
} }
// Read file into memory // Read file into memory
@ -190,7 +192,7 @@ func (s *Storage) openSession(ctx context.Context, filePath string, tp FileType)
if err != nil { if err != nil {
return nil, -1, fmt.Errorf("can't sort session, err: %s", err) return nil, -1, fmt.Errorf("can't sort session, err: %s", err)
} }
metrics.RecordSessionSortDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String()) s.metrics.RecordSessionSortDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
return mob, index, nil return mob, index, nil
} }
@ -234,12 +236,12 @@ func (s *Storage) packSession(task *Task, tp FileType) {
// Compression // Compression
start := time.Now() start := time.Now()
data := s.compress(task.ctx, mob, task.compression) data := s.compress(task.ctx, mob, task.compression)
metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String()) s.metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
// Encryption // Encryption
start = time.Now() start = time.Now()
result := s.encryptSession(task.ctx, data.Bytes(), task.key) result := s.encryptSession(task.ctx, data.Bytes(), task.key)
metrics.RecordSessionEncryptionDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String()) s.metrics.RecordSessionEncryptionDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
if tp == DOM { if tp == DOM {
task.doms = bytes.NewBuffer(result) task.doms = bytes.NewBuffer(result)
@ -296,8 +298,8 @@ func (s *Storage) packSession(task *Task, tp FileType) {
wg.Wait() wg.Wait()
// Record metrics // Record metrics
metrics.RecordSessionEncryptionDuration(float64(firstEncrypt+secondEncrypt), tp.String()) s.metrics.RecordSessionEncryptionDuration(float64(firstEncrypt+secondEncrypt), tp.String())
metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String()) s.metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String())
} }
func (s *Storage) encryptSession(ctx context.Context, data []byte, encryptionKey string) []byte { func (s *Storage) encryptSession(ctx context.Context, data []byte, encryptionKey string) []byte {
@ -382,7 +384,7 @@ func (s *Storage) uploadSession(payload interface{}) {
go func() { go func() {
if task.doms != nil { if task.doms != nil {
// Record compression ratio // Record compression ratio
metrics.RecordSessionCompressionRatio(task.domsRawSize/float64(task.doms.Len()), DOM.String()) s.metrics.RecordSessionCompressionRatio(task.domsRawSize/float64(task.doms.Len()), DOM.String())
// Upload session to s3 // Upload session to s3
start := time.Now() start := time.Now()
if err := s.objStorage.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil { if err := s.objStorage.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
@ -395,7 +397,7 @@ func (s *Storage) uploadSession(payload interface{}) {
go func() { go func() {
if task.dome != nil { if task.dome != nil {
// Record compression ratio // Record compression ratio
metrics.RecordSessionCompressionRatio(task.domeRawSize/float64(task.dome.Len()), DOM.String()) s.metrics.RecordSessionCompressionRatio(task.domeRawSize/float64(task.dome.Len()), DOM.String())
// Upload session to s3 // Upload session to s3
start := time.Now() start := time.Now()
if err := s.objStorage.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil { if err := s.objStorage.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
@ -408,7 +410,7 @@ func (s *Storage) uploadSession(payload interface{}) {
go func() { go func() {
if task.dev != nil { if task.dev != nil {
// Record compression ratio // Record compression ratio
metrics.RecordSessionCompressionRatio(task.devRawSize/float64(task.dev.Len()), DEV.String()) s.metrics.RecordSessionCompressionRatio(task.devRawSize/float64(task.dev.Len()), DEV.String())
// Upload session to s3 // Upload session to s3
start := time.Now() start := time.Now()
if err := s.objStorage.Upload(task.dev, task.id+string(DEV), "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil { if err := s.objStorage.Upload(task.dev, task.id+string(DEV), "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
@ -419,9 +421,9 @@ func (s *Storage) uploadSession(payload interface{}) {
wg.Done() wg.Done()
}() }()
wg.Wait() wg.Wait()
metrics.RecordSessionUploadDuration(float64(uploadDoms+uploadDome), DOM.String()) s.metrics.RecordSessionUploadDuration(float64(uploadDoms+uploadDome), DOM.String())
metrics.RecordSessionUploadDuration(float64(uploadDev), DEV.String()) s.metrics.RecordSessionUploadDuration(float64(uploadDev), DEV.String())
metrics.IncreaseStorageTotalSessions() s.metrics.IncreaseStorageTotalSessions()
} }
func (s *Storage) doCompression(payload interface{}) { func (s *Storage) doCompression(payload interface{}) {

View file

@ -3,6 +3,7 @@ package analytics
import ( import (
"github.com/go-playground/validator/v10" "github.com/go-playground/validator/v10"
"openreplay/backend/pkg/analytics/charts" "openreplay/backend/pkg/analytics/charts"
"openreplay/backend/pkg/metrics/database"
"time" "time"
"openreplay/backend/internal/config/analytics" "openreplay/backend/internal/config/analytics"
@ -26,9 +27,9 @@ type ServicesBuilder struct {
ChartsAPI api.Handlers ChartsAPI api.Handlers
} }
func NewServiceBuilder(log logger.Logger, cfg *analytics.Config, webMetrics web.Web, pgconn pool.Pool) (*ServicesBuilder, error) { func NewServiceBuilder(log logger.Logger, cfg *analytics.Config, webMetrics web.Web, dbMetrics database.Database, pgconn pool.Pool) (*ServicesBuilder, error) {
responser := api.NewResponser(webMetrics) responser := api.NewResponser(webMetrics)
audiTrail, err := tracer.NewTracer(log, pgconn) audiTrail, err := tracer.NewTracer(log, pgconn, dbMetrics)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -18,13 +18,14 @@ type Bulk interface {
} }
type bulkImpl struct { type bulkImpl struct {
conn driver.Conn conn driver.Conn
table string metrics database.Database
query string table string
values [][]interface{} query string
values [][]interface{}
} }
func NewBulk(conn driver.Conn, table, query string) (Bulk, error) { func NewBulk(conn driver.Conn, metrics database.Database, table, query string) (Bulk, error) {
switch { switch {
case conn == nil: case conn == nil:
return nil, errors.New("clickhouse connection is empty") return nil, errors.New("clickhouse connection is empty")
@ -34,10 +35,11 @@ func NewBulk(conn driver.Conn, table, query string) (Bulk, error) {
return nil, errors.New("query is empty") return nil, errors.New("query is empty")
} }
return &bulkImpl{ return &bulkImpl{
conn: conn, conn: conn,
table: table, metrics: metrics,
query: query, table: table,
values: make([][]interface{}, 0), query: query,
values: make([][]interface{}, 0),
}, nil }, nil
} }
@ -60,8 +62,8 @@ func (b *bulkImpl) Send() error {
} }
err = batch.Send() err = batch.Send()
// Save bulk metrics // Save bulk metrics
database.RecordBulkElements(float64(len(b.values)), "ch", b.table) b.metrics.RecordBulkElements(float64(len(b.values)), "ch", b.table)
database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "ch", b.table) b.metrics.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "ch", b.table)
// Prepare values slice for a new data // Prepare values slice for a new data
b.values = make([][]interface{}, 0) b.values = make([][]interface{}, 0)
return err return err

View file

@ -7,6 +7,7 @@ import (
"fmt" "fmt"
"hash/fnv" "hash/fnv"
"log" "log"
"openreplay/backend/pkg/metrics/database"
"strings" "strings"
"time" "time"
@ -57,13 +58,14 @@ func NewTask() *task {
type connectorImpl struct { type connectorImpl struct {
conn driver.Conn conn driver.Conn
metrics database.Database
batches map[string]Bulk //driver.Batch batches map[string]Bulk //driver.Batch
workerTask chan *task workerTask chan *task
done chan struct{} done chan struct{}
finished chan struct{} finished chan struct{}
} }
func NewConnector(cfg common.Clickhouse) Connector { func NewConnector(cfg common.Clickhouse, metrics database.Database) Connector {
conn, err := clickhouse.Open(&clickhouse.Options{ conn, err := clickhouse.Open(&clickhouse.Options{
Addr: []string{cfg.GetTrimmedURL()}, Addr: []string{cfg.GetTrimmedURL()},
Auth: clickhouse.Auth{ Auth: clickhouse.Auth{
@ -84,6 +86,7 @@ func NewConnector(cfg common.Clickhouse) Connector {
c := &connectorImpl{ c := &connectorImpl{
conn: conn, conn: conn,
metrics: metrics,
batches: make(map[string]Bulk, 20), batches: make(map[string]Bulk, 20),
workerTask: make(chan *task, 1), workerTask: make(chan *task, 1),
done: make(chan struct{}), done: make(chan struct{}),
@ -94,7 +97,7 @@ func NewConnector(cfg common.Clickhouse) Connector {
} }
func (c *connectorImpl) newBatch(name, query string) error { func (c *connectorImpl) newBatch(name, query string) error {
batch, err := NewBulk(c.conn, name, query) batch, err := NewBulk(c.conn, c.metrics, name, query)
if err != nil { if err != nil {
return fmt.Errorf("can't create new batch: %s", err) return fmt.Errorf("can't create new batch: %s", err)
} }

View file

@ -52,6 +52,7 @@ func NewBatchesTask(size int) *batchesTask {
type BatchSet struct { type BatchSet struct {
log logger.Logger log logger.Logger
c pool.Pool c pool.Pool
metrics database.Database
ctx context.Context ctx context.Context
batches map[uint64]*SessionBatch batches map[uint64]*SessionBatch
workerTask chan *batchesTask workerTask chan *batchesTask
@ -59,10 +60,11 @@ type BatchSet struct {
finished chan struct{} finished chan struct{}
} }
func NewBatchSet(log logger.Logger, c pool.Pool) *BatchSet { func NewBatchSet(log logger.Logger, c pool.Pool, metrics database.Database) *BatchSet {
bs := &BatchSet{ bs := &BatchSet{
log: log, log: log,
c: c, c: c,
metrics: metrics,
ctx: context.Background(), ctx: context.Background(),
batches: make(map[uint64]*SessionBatch), batches: make(map[uint64]*SessionBatch),
workerTask: make(chan *batchesTask, 1), workerTask: make(chan *batchesTask, 1),
@ -104,7 +106,7 @@ func (conn *BatchSet) Stop() {
func (conn *BatchSet) sendBatches(t *batchesTask) { func (conn *BatchSet) sendBatches(t *batchesTask) {
for _, batch := range t.batches { for _, batch := range t.batches {
// Record batch size // Record batch size
database.RecordBatchElements(float64(batch.Len())) conn.metrics.RecordBatchElements(float64(batch.Len()))
start := time.Now() start := time.Now()
@ -120,7 +122,7 @@ func (conn *BatchSet) sendBatches(t *batchesTask) {
} }
} }
br.Close() // returns err br.Close() // returns err
database.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds())) conn.metrics.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds()))
} }
} }

View file

@ -24,6 +24,7 @@ type Bulk interface {
type bulkImpl struct { type bulkImpl struct {
conn pool.Pool conn pool.Pool
metrics database.Database
table string table string
columns string columns string
template string template string
@ -75,12 +76,12 @@ func (b *bulkImpl) send() error {
return fmt.Errorf("send bulk err: %s", err) return fmt.Errorf("send bulk err: %s", err)
} }
// Save bulk metrics // Save bulk metrics
database.RecordBulkElements(float64(size), "pg", b.table) b.metrics.RecordBulkElements(float64(size), "pg", b.table)
database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "pg", b.table) b.metrics.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "pg", b.table)
return nil return nil
} }
func NewBulk(conn pool.Pool, table, columns, template string, setSize, sizeLimit int) (Bulk, error) { func NewBulk(conn pool.Pool, metrics database.Database, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
switch { switch {
case conn == nil: case conn == nil:
return nil, errors.New("db conn is empty") return nil, errors.New("db conn is empty")
@ -97,6 +98,7 @@ func NewBulk(conn pool.Pool, table, columns, template string, setSize, sizeLimit
} }
return &bulkImpl{ return &bulkImpl{
conn: conn, conn: conn,
metrics: metrics,
table: table, table: table,
columns: columns, columns: columns,
template: template, template: template,

View file

@ -2,6 +2,7 @@ package postgres
import ( import (
"context" "context"
"openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/db/postgres/pool" "openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
@ -21,6 +22,7 @@ type BulkSet struct {
log logger.Logger log logger.Logger
c pool.Pool c pool.Pool
ctx context.Context ctx context.Context
metrics database.Database
autocompletes Bulk autocompletes Bulk
requests Bulk requests Bulk
customEvents Bulk customEvents Bulk
@ -43,10 +45,11 @@ type BulkSet struct {
finished chan struct{} finished chan struct{}
} }
func NewBulkSet(log logger.Logger, c pool.Pool) *BulkSet { func NewBulkSet(log logger.Logger, c pool.Pool, metrics database.Database) *BulkSet {
bs := &BulkSet{ bs := &BulkSet{
log: log, log: log,
c: c, c: c,
metrics: metrics,
ctx: context.Background(), ctx: context.Background(),
workerTask: make(chan *bulksTask, 1), workerTask: make(chan *bulksTask, 1),
done: make(chan struct{}), done: make(chan struct{}),
@ -100,7 +103,7 @@ func (conn *BulkSet) Get(name string) Bulk {
func (conn *BulkSet) initBulks() { func (conn *BulkSet) initBulks() {
var err error var err error
conn.autocompletes, err = NewBulk(conn.c, conn.autocompletes, err = NewBulk(conn.c, conn.metrics,
"autocomplete", "autocomplete",
"(value, type, project_id)", "(value, type, project_id)",
"($%d, $%d, $%d)", "($%d, $%d, $%d)",
@ -108,7 +111,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create autocomplete bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create autocomplete bulk: %s", err)
} }
conn.requests, err = NewBulk(conn.c, conn.requests, err = NewBulk(conn.c, conn.metrics,
"events_common.requests", "events_common.requests",
"(session_id, timestamp, seq_index, url, duration, success)", "(session_id, timestamp, seq_index, url, duration, success)",
"($%d, $%d, $%d, LEFT($%d, 8000), $%d, $%d)", "($%d, $%d, $%d, LEFT($%d, 8000), $%d, $%d)",
@ -116,7 +119,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create requests bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create requests bulk: %s", err)
} }
conn.customEvents, err = NewBulk(conn.c, conn.customEvents, err = NewBulk(conn.c, conn.metrics,
"events_common.customs", "events_common.customs",
"(session_id, timestamp, seq_index, name, payload)", "(session_id, timestamp, seq_index, name, payload)",
"($%d, $%d, $%d, LEFT($%d, 2000), $%d)", "($%d, $%d, $%d, LEFT($%d, 2000), $%d)",
@ -124,7 +127,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create customEvents bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create customEvents bulk: %s", err)
} }
conn.webPageEvents, err = NewBulk(conn.c, conn.webPageEvents, err = NewBulk(conn.c, conn.metrics,
"events.pages", "events.pages",
"(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+ "(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+
"load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+ "load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+
@ -136,7 +139,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webPageEvents bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webPageEvents bulk: %s", err)
} }
conn.webInputDurations, err = NewBulk(conn.c, conn.webInputDurations, err = NewBulk(conn.c, conn.metrics,
"events.inputs", "events.inputs",
"(session_id, message_id, timestamp, label, hesitation, duration)", "(session_id, message_id, timestamp, label, hesitation, duration)",
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000),''), $%d, $%d)", "($%d, $%d, $%d, NULLIF(LEFT($%d, 2000),''), $%d, $%d)",
@ -144,7 +147,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webInputDurations bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webInputDurations bulk: %s", err)
} }
conn.webGraphQL, err = NewBulk(conn.c, conn.webGraphQL, err = NewBulk(conn.c, conn.metrics,
"events.graphql", "events.graphql",
"(session_id, timestamp, message_id, name, request_body, response_body)", "(session_id, timestamp, message_id, name, request_body, response_body)",
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)", "($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
@ -152,7 +155,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webGraphQL bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webGraphQL bulk: %s", err)
} }
conn.webErrors, err = NewBulk(conn.c, conn.webErrors, err = NewBulk(conn.c, conn.metrics,
"errors", "errors",
"(error_id, project_id, source, name, message, payload)", "(error_id, project_id, source, name, message, payload)",
"($%d, $%d, $%d, $%d, $%d, $%d::jsonb)", "($%d, $%d, $%d, $%d, $%d, $%d::jsonb)",
@ -160,7 +163,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webErrors bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webErrors bulk: %s", err)
} }
conn.webErrorEvents, err = NewBulk(conn.c, conn.webErrorEvents, err = NewBulk(conn.c, conn.metrics,
"events.errors", "events.errors",
"(session_id, message_id, timestamp, error_id)", "(session_id, message_id, timestamp, error_id)",
"($%d, $%d, $%d, $%d)", "($%d, $%d, $%d, $%d)",
@ -168,7 +171,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webErrorEvents bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webErrorEvents bulk: %s", err)
} }
conn.webErrorTags, err = NewBulk(conn.c, conn.webErrorTags, err = NewBulk(conn.c, conn.metrics,
"public.errors_tags", "public.errors_tags",
"(session_id, message_id, error_id, key, value)", "(session_id, message_id, error_id, key, value)",
"($%d, $%d, $%d, $%d, $%d)", "($%d, $%d, $%d, $%d, $%d)",
@ -176,7 +179,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webErrorTags bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webErrorTags bulk: %s", err)
} }
conn.webIssues, err = NewBulk(conn.c, conn.webIssues, err = NewBulk(conn.c, conn.metrics,
"issues", "issues",
"(project_id, issue_id, type, context_string)", "(project_id, issue_id, type, context_string)",
"($%d, $%d, $%d, $%d)", "($%d, $%d, $%d, $%d)",
@ -184,7 +187,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webIssues bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webIssues bulk: %s", err)
} }
conn.webIssueEvents, err = NewBulk(conn.c, conn.webIssueEvents, err = NewBulk(conn.c, conn.metrics,
"events_common.issues", "events_common.issues",
"(session_id, issue_id, timestamp, seq_index, payload)", "(session_id, issue_id, timestamp, seq_index, payload)",
"($%d, $%d, $%d, $%d, CAST($%d AS jsonb))", "($%d, $%d, $%d, $%d, CAST($%d AS jsonb))",
@ -192,7 +195,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webIssueEvents bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webIssueEvents bulk: %s", err)
} }
conn.webCustomEvents, err = NewBulk(conn.c, conn.webCustomEvents, err = NewBulk(conn.c, conn.metrics,
"events_common.customs", "events_common.customs",
"(session_id, seq_index, timestamp, name, payload, level)", "(session_id, seq_index, timestamp, name, payload, level)",
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)", "($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
@ -200,7 +203,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webCustomEvents bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webCustomEvents bulk: %s", err)
} }
conn.webClickEvents, err = NewBulk(conn.c, conn.webClickEvents, err = NewBulk(conn.c, conn.metrics,
"events.clicks", "events.clicks",
"(session_id, message_id, timestamp, label, selector, url, path, hesitation)", "(session_id, message_id, timestamp, label, selector, url, path, hesitation)",
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d)", "($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d)",
@ -208,7 +211,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err)
} }
conn.webClickXYEvents, err = NewBulk(conn.c, conn.webClickXYEvents, err = NewBulk(conn.c, conn.metrics,
"events.clicks", "events.clicks",
"(session_id, message_id, timestamp, label, selector, url, path, hesitation, normalized_x, normalized_y)", "(session_id, message_id, timestamp, label, selector, url, path, hesitation, normalized_x, normalized_y)",
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d, $%d, $%d)", "($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d, $%d, $%d)",
@ -216,7 +219,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err)
} }
conn.webNetworkRequest, err = NewBulk(conn.c, conn.webNetworkRequest, err = NewBulk(conn.c, conn.metrics,
"events_common.requests", "events_common.requests",
"(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success, transfer_size)", "(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success, transfer_size)",
"($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d, $%d)", "($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d, $%d)",
@ -224,7 +227,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webNetworkRequest bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webNetworkRequest bulk: %s", err)
} }
conn.webCanvasNodes, err = NewBulk(conn.c, conn.webCanvasNodes, err = NewBulk(conn.c, conn.metrics,
"events.canvas_recordings", "events.canvas_recordings",
"(session_id, recording_id, timestamp)", "(session_id, recording_id, timestamp)",
"($%d, $%d, $%d)", "($%d, $%d, $%d)",
@ -232,7 +235,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webCanvasNodes bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webCanvasNodes bulk: %s", err)
} }
conn.webTagTriggers, err = NewBulk(conn.c, conn.webTagTriggers, err = NewBulk(conn.c, conn.metrics,
"events.tags", "events.tags",
"(session_id, timestamp, seq_index, tag_id)", "(session_id, timestamp, seq_index, tag_id)",
"($%d, $%d, $%d, $%d)", "($%d, $%d, $%d, $%d)",

View file

@ -2,6 +2,7 @@ package postgres
import ( import (
"context" "context"
"openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/db/postgres/batch" "openreplay/backend/pkg/db/postgres/batch"
"openreplay/backend/pkg/db/postgres/pool" "openreplay/backend/pkg/db/postgres/pool"
@ -22,7 +23,7 @@ type Conn struct {
chConn CH chConn CH
} }
func NewConn(log logger.Logger, pool pool.Pool, ch CH) *Conn { func NewConn(log logger.Logger, pool pool.Pool, ch CH, metrics database.Database) *Conn {
if pool == nil { if pool == nil {
log.Fatal(context.Background(), "pg pool is empty") log.Fatal(context.Background(), "pg pool is empty")
} }
@ -30,8 +31,8 @@ func NewConn(log logger.Logger, pool pool.Pool, ch CH) *Conn {
log: log, log: log,
Pool: pool, Pool: pool,
chConn: ch, chConn: ch,
bulks: NewBulkSet(log, pool), bulks: NewBulkSet(log, pool, metrics),
batches: batch.NewBatchSet(log, pool), batches: batch.NewBatchSet(log, pool, metrics),
} }
} }

View file

@ -23,58 +23,12 @@ type Pool interface {
} }
type poolImpl struct { type poolImpl struct {
url string url string
conn *pgxpool.Pool conn *pgxpool.Pool
metrics database.Database
} }
func (p *poolImpl) Query(sql string, args ...interface{}) (pgx.Rows, error) { func New(metrics database.Database, url string) (Pool, error) {
start := time.Now()
res, err := p.conn.Query(getTimeoutContext(), sql, args...)
method, table := methodName(sql)
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
database.IncreaseTotalRequests(method, table)
return res, err
}
func (p *poolImpl) QueryRow(sql string, args ...interface{}) pgx.Row {
start := time.Now()
res := p.conn.QueryRow(getTimeoutContext(), sql, args...)
method, table := methodName(sql)
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
database.IncreaseTotalRequests(method, table)
return res
}
func (p *poolImpl) Exec(sql string, arguments ...interface{}) error {
start := time.Now()
_, err := p.conn.Exec(getTimeoutContext(), sql, arguments...)
method, table := methodName(sql)
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
database.IncreaseTotalRequests(method, table)
return err
}
func (p *poolImpl) SendBatch(b *pgx.Batch) pgx.BatchResults {
start := time.Now()
res := p.conn.SendBatch(getTimeoutContext(), b)
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "sendBatch", "")
database.IncreaseTotalRequests("sendBatch", "")
return res
}
func (p *poolImpl) Begin() (*Tx, error) {
start := time.Now()
tx, err := p.conn.Begin(context.Background())
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "")
database.IncreaseTotalRequests("begin", "")
return &Tx{tx}, err
}
func (p *poolImpl) Close() {
p.conn.Close()
}
func New(url string) (Pool, error) {
if url == "" { if url == "" {
return nil, errors.New("pg connection url is empty") return nil, errors.New("pg connection url is empty")
} }
@ -83,24 +37,73 @@ func New(url string) (Pool, error) {
return nil, fmt.Errorf("pgxpool.Connect error: %v", err) return nil, fmt.Errorf("pgxpool.Connect error: %v", err)
} }
res := &poolImpl{ res := &poolImpl{
url: url, url: url,
conn: conn, conn: conn,
metrics: metrics,
} }
return res, nil return res, nil
} }
func (p *poolImpl) Query(sql string, args ...interface{}) (pgx.Rows, error) {
start := time.Now()
res, err := p.conn.Query(getTimeoutContext(), sql, args...)
method, table := methodName(sql)
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
p.metrics.IncreaseTotalRequests(method, table)
return res, err
}
func (p *poolImpl) QueryRow(sql string, args ...interface{}) pgx.Row {
start := time.Now()
res := p.conn.QueryRow(getTimeoutContext(), sql, args...)
method, table := methodName(sql)
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
p.metrics.IncreaseTotalRequests(method, table)
return res
}
func (p *poolImpl) Exec(sql string, arguments ...interface{}) error {
start := time.Now()
_, err := p.conn.Exec(getTimeoutContext(), sql, arguments...)
method, table := methodName(sql)
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
p.metrics.IncreaseTotalRequests(method, table)
return err
}
func (p *poolImpl) SendBatch(b *pgx.Batch) pgx.BatchResults {
start := time.Now()
res := p.conn.SendBatch(getTimeoutContext(), b)
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "sendBatch", "")
p.metrics.IncreaseTotalRequests("sendBatch", "")
return res
}
func (p *poolImpl) Begin() (*Tx, error) {
start := time.Now()
tx, err := p.conn.Begin(context.Background())
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "")
p.metrics.IncreaseTotalRequests("begin", "")
return &Tx{tx, p.metrics}, err
}
func (p *poolImpl) Close() {
p.conn.Close()
}
// TX - start // TX - start
type Tx struct { type Tx struct {
pgx.Tx pgx.Tx
metrics database.Database
} }
func (tx *Tx) TxExec(sql string, args ...interface{}) error { func (tx *Tx) TxExec(sql string, args ...interface{}) error {
start := time.Now() start := time.Now()
_, err := tx.Exec(context.Background(), sql, args...) _, err := tx.Exec(context.Background(), sql, args...)
method, table := methodName(sql) method, table := methodName(sql)
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
database.IncreaseTotalRequests(method, table) tx.metrics.IncreaseTotalRequests(method, table)
return err return err
} }
@ -108,24 +111,24 @@ func (tx *Tx) TxQueryRow(sql string, args ...interface{}) pgx.Row {
start := time.Now() start := time.Now()
res := tx.QueryRow(context.Background(), sql, args...) res := tx.QueryRow(context.Background(), sql, args...)
method, table := methodName(sql) method, table := methodName(sql)
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
database.IncreaseTotalRequests(method, table) tx.metrics.IncreaseTotalRequests(method, table)
return res return res
} }
func (tx *Tx) TxRollback() error { func (tx *Tx) TxRollback() error {
start := time.Now() start := time.Now()
err := tx.Rollback(context.Background()) err := tx.Rollback(context.Background())
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "") tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "")
database.IncreaseTotalRequests("rollback", "") tx.metrics.IncreaseTotalRequests("rollback", "")
return err return err
} }
func (tx *Tx) TxCommit() error { func (tx *Tx) TxCommit() error {
start := time.Now() start := time.Now()
err := tx.Commit(context.Background()) err := tx.Commit(context.Background())
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "") tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "")
database.IncreaseTotalRequests("commit", "") tx.metrics.IncreaseTotalRequests("commit", "")
return err return err
} }

View file

@ -2,6 +2,7 @@ package integrations
import ( import (
"openreplay/backend/pkg/integrations/service" "openreplay/backend/pkg/integrations/service"
"openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/metrics/web" "openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/server/tracer" "openreplay/backend/pkg/server/tracer"
"time" "time"
@ -23,7 +24,7 @@ type ServiceBuilder struct {
IntegrationsAPI api.Handlers IntegrationsAPI api.Handlers
} }
func NewServiceBuilder(log logger.Logger, cfg *integrations.Config, webMetrics web.Web, pgconn pool.Pool) (*ServiceBuilder, error) { func NewServiceBuilder(log logger.Logger, cfg *integrations.Config, webMetrics web.Web, dbMetrics database.Database, pgconn pool.Pool) (*ServiceBuilder, error) {
objStore, err := store.NewStore(&cfg.ObjectsConfig) objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil { if err != nil {
return nil, err return nil, err
@ -37,7 +38,7 @@ func NewServiceBuilder(log logger.Logger, cfg *integrations.Config, webMetrics w
if err != nil { if err != nil {
return nil, err return nil, err
} }
auditrail, err := tracer.NewTracer(log, pgconn) auditrail, err := tracer.NewTracer(log, pgconn, dbMetrics)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -8,11 +8,13 @@ import (
type sinkIteratorImpl struct { type sinkIteratorImpl struct {
coreIterator MessageIterator coreIterator MessageIterator
handler MessageHandler handler MessageHandler
metrics sink.Sink
} }
func NewSinkMessageIterator(log logger.Logger, messageHandler MessageHandler, messageFilter []int, autoDecode bool) MessageIterator { func NewSinkMessageIterator(log logger.Logger, messageHandler MessageHandler, messageFilter []int, autoDecode bool, metrics sink.Sink) MessageIterator {
iter := &sinkIteratorImpl{ iter := &sinkIteratorImpl{
handler: messageHandler, handler: messageHandler,
metrics: metrics,
} }
iter.coreIterator = NewMessageIterator(log, iter.handle, messageFilter, autoDecode) iter.coreIterator = NewMessageIterator(log, iter.handle, messageFilter, autoDecode)
return iter return iter
@ -23,8 +25,8 @@ func (i *sinkIteratorImpl) handle(message Message) {
} }
func (i *sinkIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) { func (i *sinkIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
sink.RecordBatchSize(float64(len(batchData))) i.metrics.RecordBatchSize(float64(len(batchData)))
sink.IncreaseTotalBatches() i.metrics.IncreaseTotalBatches()
// Call core iterator // Call core iterator
i.coreIterator.Iterate(batchData, batchInfo) i.coreIterator.Iterate(batchData, batchInfo)
// Send batch end signal // Send batch end signal

View file

@ -1,22 +0,0 @@
package analytics
import (
"github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
)
var cardCreated = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "card",
Name: "created",
Help: "Histogram for tracking card creation",
Buckets: common.DefaultBuckets,
},
)
func List() []prometheus.Collector {
return []prometheus.Collector{
cardCreated,
}
}

View file

@ -2,71 +2,22 @@ package assets
import ( import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
"strconv"
) )
var assetsProcessedSessions = prometheus.NewCounter( type Assets interface {
prometheus.CounterOpts{ IncreaseProcessesSessions()
Namespace: "assets", IncreaseSavedSessions()
Name: "processed_total", RecordDownloadDuration(durMillis float64, code int)
Help: "A counter displaying the total count of processed assets.", RecordUploadDuration(durMillis float64, isFailed bool)
}, List() []prometheus.Collector
)
func IncreaseProcessesSessions() {
assetsProcessedSessions.Inc()
} }
var assetsSavedSessions = prometheus.NewCounter( type assetsImpl struct{}
prometheus.CounterOpts{
Namespace: "assets",
Name: "saved_total",
Help: "A counter displaying the total number of cached assets.",
},
)
func IncreaseSavedSessions() { func New(serviceName string) Assets { return &assetsImpl{} }
assetsSavedSessions.Inc()
}
var assetsDownloadDuration = prometheus.NewHistogramVec( func (a *assetsImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
prometheus.HistogramOpts{ func (a *assetsImpl) IncreaseProcessesSessions() {}
Namespace: "assets", func (a *assetsImpl) IncreaseSavedSessions() {}
Name: "download_duration_seconds", func (a *assetsImpl) RecordDownloadDuration(durMillis float64, code int) {}
Help: "A histogram displaying the duration of downloading for each asset in seconds.", func (a *assetsImpl) RecordUploadDuration(durMillis float64, isFailed bool) {}
Buckets: common.DefaultDurationBuckets,
},
[]string{"response_code"},
)
func RecordDownloadDuration(durMillis float64, code int) {
assetsDownloadDuration.WithLabelValues(strconv.Itoa(code)).Observe(durMillis / 1000.0)
}
var assetsUploadDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "assets",
Name: "upload_s3_duration_seconds",
Help: "A histogram displaying the duration of uploading to s3 for each asset in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"failed"},
)
func RecordUploadDuration(durMillis float64, isFailed bool) {
failed := "false"
if isFailed {
failed = "true"
}
assetsUploadDuration.WithLabelValues(failed).Observe(durMillis / 1000.0)
}
func List() []prometheus.Collector {
return []prometheus.Collector{
assetsProcessedSessions,
assetsSavedSessions,
assetsDownloadDuration,
assetsUploadDuration,
}
}

View file

@ -2,7 +2,6 @@ package canvas
import ( import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
) )
type Canvas interface { type Canvas interface {
@ -18,175 +17,17 @@ type Canvas interface {
List() []prometheus.Collector List() []prometheus.Collector
} }
type canvasImpl struct { type canvasImpl struct{}
canvasesImageSize prometheus.Histogram
canvasesTotalSavedImages prometheus.Counter
canvasesImagesPerCanvas prometheus.Histogram
canvasesCanvasesPerSession prometheus.Histogram
canvasesPreparingDuration prometheus.Histogram
canvasesTotalCreatedArchives prometheus.Counter
canvasesArchivingDuration prometheus.Histogram
canvasesArchiveSize prometheus.Histogram
canvasesUploadingDuration prometheus.Histogram
}
func New(serviceName string) Canvas { func New(serviceName string) Canvas { return &canvasImpl{} }
return &canvasImpl{
canvasesImageSize: newImageSizeMetric(serviceName),
canvasesTotalSavedImages: newTotalSavedImages(serviceName),
canvasesImagesPerCanvas: newImagesPerCanvas(serviceName),
canvasesCanvasesPerSession: newCanvasesPerSession(serviceName),
canvasesPreparingDuration: newPreparingDuration(serviceName),
canvasesTotalCreatedArchives: newTotalCreatedArchives(serviceName),
canvasesArchivingDuration: newArchivingDuration(serviceName),
canvasesArchiveSize: newArchiveSize(serviceName),
canvasesUploadingDuration: newUploadingDuration(serviceName),
}
}
func (c *canvasImpl) List() []prometheus.Collector { func (c *canvasImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
return []prometheus.Collector{ func (c *canvasImpl) RecordCanvasImageSize(size float64) {}
c.canvasesImageSize, func (c *canvasImpl) IncreaseTotalSavedImages() {}
c.canvasesTotalSavedImages, func (c *canvasImpl) RecordImagesPerCanvas(number float64) {}
c.canvasesImagesPerCanvas, func (c *canvasImpl) RecordCanvasesPerSession(number float64) {}
c.canvasesCanvasesPerSession, func (c *canvasImpl) RecordPreparingDuration(duration float64) {}
c.canvasesPreparingDuration, func (c *canvasImpl) IncreaseTotalCreatedArchives() {}
c.canvasesTotalCreatedArchives, func (c *canvasImpl) RecordArchivingDuration(duration float64) {}
c.canvasesArchivingDuration, func (c *canvasImpl) RecordArchiveSize(size float64) {}
c.canvasesArchiveSize, func (c *canvasImpl) RecordUploadingDuration(duration float64) {}
c.canvasesUploadingDuration,
}
}
func newImageSizeMetric(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "image_size_bytes",
Help: "A histogram displaying the size of each canvas image in bytes.",
Buckets: common.DefaultSizeBuckets,
},
)
}
func (c *canvasImpl) RecordCanvasImageSize(size float64) {
c.canvasesImageSize.Observe(size)
}
func newTotalSavedImages(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "total_saved_images",
Help: "A counter displaying the total number of saved images.",
},
)
}
func (c *canvasImpl) IncreaseTotalSavedImages() {
c.canvasesTotalSavedImages.Inc()
}
func newImagesPerCanvas(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "images_per_canvas",
Help: "A histogram displaying the number of images per canvas.",
Buckets: common.DefaultBuckets,
},
)
}
func (c *canvasImpl) RecordImagesPerCanvas(number float64) {
c.canvasesImagesPerCanvas.Observe(number)
}
func newCanvasesPerSession(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "canvases_per_session",
Help: "A histogram displaying the number of canvases per session.",
Buckets: common.DefaultBuckets,
},
)
}
func (c *canvasImpl) RecordCanvasesPerSession(number float64) {
c.canvasesCanvasesPerSession.Observe(number)
}
func newPreparingDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "preparing_duration_seconds",
Help: "A histogram displaying the duration of preparing the list of canvases for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (c *canvasImpl) RecordPreparingDuration(duration float64) {
c.canvasesPreparingDuration.Observe(duration)
}
func newTotalCreatedArchives(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "total_created_archives",
Help: "A counter displaying the total number of created canvas archives.",
},
)
}
func (c *canvasImpl) IncreaseTotalCreatedArchives() {
c.canvasesTotalCreatedArchives.Inc()
}
func newArchivingDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "archiving_duration_seconds",
Help: "A histogram displaying the duration of archiving for each canvas in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (c *canvasImpl) RecordArchivingDuration(duration float64) {
c.canvasesArchivingDuration.Observe(duration)
}
func newArchiveSize(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "archive_size_bytes",
Help: "A histogram displaying the size of each canvas archive in bytes.",
Buckets: common.DefaultSizeBuckets,
},
)
}
func (c *canvasImpl) RecordArchiveSize(size float64) {
c.canvasesArchiveSize.Observe(size)
}
func newUploadingDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "uploading_duration_seconds",
Help: "A histogram displaying the duration of uploading for each canvas in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (c *canvasImpl) RecordUploadingDuration(duration float64) {
c.canvasesUploadingDuration.Observe(duration)
}

View file

@ -2,141 +2,32 @@ package database
import ( import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
) )
var dbBatchElements = prometheus.NewHistogram( type Database interface {
prometheus.HistogramOpts{ RecordBatchElements(number float64)
Namespace: "db", RecordBatchInsertDuration(durMillis float64)
Name: "batch_size_elements", RecordBulkSize(size float64, db, table string)
Help: "A histogram displaying the number of SQL commands in each batch.", RecordBulkElements(size float64, db, table string)
Buckets: common.DefaultBuckets, RecordBulkInsertDuration(durMillis float64, db, table string)
}, RecordRequestDuration(durMillis float64, method, table string)
) IncreaseTotalRequests(method, table string)
IncreaseRedisRequests(method, table string)
func RecordBatchElements(number float64) { RecordRedisRequestDuration(durMillis float64, method, table string)
dbBatchElements.Observe(number) List() []prometheus.Collector
} }
var dbBatchInsertDuration = prometheus.NewHistogram( type databaseImpl struct{}
prometheus.HistogramOpts{
Namespace: "db",
Name: "batch_insert_duration_seconds",
Help: "A histogram displaying the duration of batch inserts in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
func RecordBatchInsertDuration(durMillis float64) { func New(serviceName string) Database { return &databaseImpl{} }
dbBatchInsertDuration.Observe(durMillis / 1000.0)
}
var dbBulkSize = prometheus.NewHistogramVec( func (d *databaseImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
prometheus.HistogramOpts{ func (d *databaseImpl) RecordBatchElements(number float64) {}
Namespace: "db", func (d *databaseImpl) RecordBatchInsertDuration(durMillis float64) {}
Name: "bulk_size_bytes", func (d *databaseImpl) RecordBulkSize(size float64, db, table string) {}
Help: "A histogram displaying the bulk size in bytes.", func (d *databaseImpl) RecordBulkElements(size float64, db, table string) {}
Buckets: common.DefaultSizeBuckets, func (d *databaseImpl) RecordBulkInsertDuration(durMillis float64, db, table string) {}
}, func (d *databaseImpl) RecordRequestDuration(durMillis float64, method, table string) {}
[]string{"db", "table"}, func (d *databaseImpl) IncreaseTotalRequests(method, table string) {}
) func (d *databaseImpl) IncreaseRedisRequests(method, table string) {}
func (d *databaseImpl) RecordRedisRequestDuration(durMillis float64, method, table string) {}
func RecordBulkSize(size float64, db, table string) {
dbBulkSize.WithLabelValues(db, table).Observe(size)
}
var dbBulkElements = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "db",
Name: "bulk_size_elements",
Help: "A histogram displaying the size of data set in each bulk.",
Buckets: common.DefaultBuckets,
},
[]string{"db", "table"},
)
func RecordBulkElements(size float64, db, table string) {
dbBulkElements.WithLabelValues(db, table).Observe(size)
}
var dbBulkInsertDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "db",
Name: "bulk_insert_duration_seconds",
Help: "A histogram displaying the duration of bulk inserts in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"db", "table"},
)
func RecordBulkInsertDuration(durMillis float64, db, table string) {
dbBulkInsertDuration.WithLabelValues(db, table).Observe(durMillis / 1000.0)
}
var dbRequestDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "db",
Name: "request_duration_seconds",
Help: "A histogram displaying the duration of each sql request in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"method", "table"},
)
func RecordRequestDuration(durMillis float64, method, table string) {
dbRequestDuration.WithLabelValues(method, table).Observe(durMillis / 1000.0)
}
var dbTotalRequests = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "db",
Name: "requests_total",
Help: "A counter showing the total number of all SQL requests.",
},
[]string{"method", "table"},
)
func IncreaseTotalRequests(method, table string) {
dbTotalRequests.WithLabelValues(method, table).Inc()
}
var cacheRedisRequests = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "cache",
Name: "redis_requests_total",
Help: "A counter showing the total number of all Redis requests.",
},
[]string{"method", "table"},
)
func IncreaseRedisRequests(method, table string) {
cacheRedisRequests.WithLabelValues(method, table).Inc()
}
var cacheRedisRequestDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "cache",
Name: "redis_request_duration_seconds",
Help: "A histogram displaying the duration of each Redis request in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"method", "table"},
)
func RecordRedisRequestDuration(durMillis float64, method, table string) {
cacheRedisRequestDuration.WithLabelValues(method, table).Observe(durMillis / 1000.0)
}
func List() []prometheus.Collector {
return []prometheus.Collector{
dbBatchElements,
dbBatchInsertDuration,
dbBulkSize,
dbBulkElements,
dbBulkInsertDuration,
dbRequestDuration,
dbTotalRequests,
cacheRedisRequests,
cacheRedisRequestDuration,
}
}

View file

@ -2,50 +2,20 @@ package ender
import "github.com/prometheus/client_golang/prometheus" import "github.com/prometheus/client_golang/prometheus"
var enderActiveSessions = prometheus.NewGauge( type Ender interface {
prometheus.GaugeOpts{ IncreaseActiveSessions()
Namespace: "ender", DecreaseActiveSessions()
Name: "sessions_active", IncreaseClosedSessions()
Help: "A gauge displaying the number of active (live) sessions.", IncreaseTotalSessions()
}, List() []prometheus.Collector
)
func IncreaseActiveSessions() {
enderActiveSessions.Inc()
} }
func DecreaseActiveSessions() { type enderImpl struct{}
enderActiveSessions.Dec()
}
var enderClosedSessions = prometheus.NewCounter( func New(serviceName string) Ender { return &enderImpl{} }
prometheus.CounterOpts{
Namespace: "ender",
Name: "sessions_closed",
Help: "A counter displaying the number of closed sessions (sent SessionEnd).",
},
)
func IncreaseClosedSessions() { func (e *enderImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
enderClosedSessions.Inc() func (e *enderImpl) IncreaseActiveSessions() {}
} func (e *enderImpl) DecreaseActiveSessions() {}
func (e *enderImpl) IncreaseClosedSessions() {}
var enderTotalSessions = prometheus.NewCounter( func (e *enderImpl) IncreaseTotalSessions() {}
prometheus.CounterOpts{
Namespace: "ender",
Name: "sessions_total",
Help: "A counter displaying the number of all processed sessions.",
},
)
func IncreaseTotalSessions() {
enderTotalSessions.Inc()
}
func List() []prometheus.Collector {
return []prometheus.Collector{
enderActiveSessions,
enderClosedSessions,
enderTotalSessions,
}
}

View file

@ -2,65 +2,16 @@ package heuristics
import ( import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
"strconv"
) )
var heuristicsTotalEvents = prometheus.NewCounterVec( type Heuristics interface {
prometheus.CounterOpts{ IncreaseTotalEvents(eventType string)
Namespace: "heuristics", List() []prometheus.Collector
Name: "events_total",
Help: "A counter displaying the number of all processed events",
},
[]string{"type"},
)
func IncreaseTotalEvents(eventType string) {
heuristicsTotalEvents.WithLabelValues(eventType).Inc()
} }
var heuristicsRequestSize = prometheus.NewHistogramVec( type heuristicsImpl struct{}
prometheus.HistogramOpts{
Namespace: "heuristics",
Name: "request_size_bytes",
Help: "A histogram displaying the size of each HTTP request in bytes.",
Buckets: common.DefaultSizeBuckets,
},
[]string{"url", "response_code"},
)
func RecordRequestSize(size float64, url string, code int) { func New(serviceName string) Heuristics { return &heuristicsImpl{} }
heuristicsRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size)
}
var heuristicsRequestDuration = prometheus.NewHistogramVec( func (h *heuristicsImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
prometheus.HistogramOpts{ func (h *heuristicsImpl) IncreaseTotalEvents(eventType string) {}
Namespace: "heuristics",
Name: "request_duration_seconds",
Help: "A histogram displaying the duration of each HTTP request in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"url", "response_code"},
)
func RecordRequestDuration(durMillis float64, url string, code int) {
heuristicsRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0)
}
var heuristicsTotalRequests = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "heuristics",
Name: "requests_total",
Help: "A counter displaying the number all HTTP requests.",
},
)
func IncreaseTotalRequests() {
heuristicsTotalRequests.Inc()
}
func List() []prometheus.Collector {
return []prometheus.Collector{
heuristicsTotalEvents,
}
}

View file

@ -2,7 +2,6 @@ package images
import ( import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
) )
type Images interface { type Images interface {
@ -18,174 +17,17 @@ type Images interface {
List() []prometheus.Collector List() []prometheus.Collector
} }
type imagesImpl struct { type imagesImpl struct{}
originalArchiveSize prometheus.Histogram
originalArchiveExtractionDuration prometheus.Histogram
totalSavedArchives prometheus.Counter
savingImageDuration prometheus.Histogram
totalSavedImages prometheus.Counter
totalCreatedArchives prometheus.Counter
archivingDuration prometheus.Histogram
archiveSize prometheus.Histogram
uploadingDuration prometheus.Histogram
}
func New(serviceName string) Images { func New(serviceName string) Images { return &imagesImpl{} }
return &imagesImpl{
originalArchiveSize: newOriginalArchiveSize(serviceName),
originalArchiveExtractionDuration: newOriginalArchiveExtractionDuration(serviceName),
totalSavedArchives: newTotalSavedArchives(serviceName),
savingImageDuration: newSavingImageDuration(serviceName),
totalSavedImages: newTotalSavedImages(serviceName),
totalCreatedArchives: newTotalCreatedArchives(serviceName),
archivingDuration: newArchivingDuration(serviceName),
archiveSize: newArchiveSize(serviceName),
uploadingDuration: newUploadingDuration(serviceName),
}
}
func (i *imagesImpl) List() []prometheus.Collector { func (i *imagesImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
return []prometheus.Collector{ func (i *imagesImpl) RecordOriginalArchiveSize(size float64) {}
i.originalArchiveSize, func (i *imagesImpl) RecordOriginalArchiveExtractionDuration(duration float64) {}
i.originalArchiveExtractionDuration, func (i *imagesImpl) IncreaseTotalSavedArchives() {}
i.totalSavedArchives, func (i *imagesImpl) RecordSavingImageDuration(duration float64) {}
i.savingImageDuration, func (i *imagesImpl) IncreaseTotalSavedImages() {}
i.totalSavedImages, func (i *imagesImpl) IncreaseTotalCreatedArchives() {}
i.totalCreatedArchives, func (i *imagesImpl) RecordArchivingDuration(duration float64) {}
i.archivingDuration, func (i *imagesImpl) RecordArchiveSize(size float64) {}
i.archiveSize, func (i *imagesImpl) RecordUploadingDuration(duration float64) {}
i.uploadingDuration,
}
}
func newOriginalArchiveSize(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "original_archive_size_bytes",
Help: "A histogram displaying the original archive size in bytes.",
Buckets: common.DefaultSizeBuckets,
},
)
}
func (i *imagesImpl) RecordOriginalArchiveSize(size float64) {
i.archiveSize.Observe(size)
}
func newOriginalArchiveExtractionDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "original_archive_extraction_duration_seconds",
Help: "A histogram displaying the duration of extracting the original archive.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (i *imagesImpl) RecordOriginalArchiveExtractionDuration(duration float64) {
i.originalArchiveExtractionDuration.Observe(duration)
}
func newTotalSavedArchives(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "total_saved_archives",
Help: "A counter displaying the total number of saved original archives.",
},
)
}
func (i *imagesImpl) IncreaseTotalSavedArchives() {
i.totalSavedArchives.Inc()
}
func newSavingImageDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "saving_image_duration_seconds",
Help: "A histogram displaying the duration of saving each image in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (i *imagesImpl) RecordSavingImageDuration(duration float64) {
i.savingImageDuration.Observe(duration)
}
func newTotalSavedImages(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "total_saved_images",
Help: "A counter displaying the total number of saved images.",
},
)
}
func (i *imagesImpl) IncreaseTotalSavedImages() {
i.totalSavedImages.Inc()
}
func newTotalCreatedArchives(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "total_created_archives",
Help: "A counter displaying the total number of created archives.",
},
)
}
func (i *imagesImpl) IncreaseTotalCreatedArchives() {
i.totalCreatedArchives.Inc()
}
func newArchivingDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "archiving_duration_seconds",
Help: "A histogram displaying the duration of archiving each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (i *imagesImpl) RecordArchivingDuration(duration float64) {
i.archivingDuration.Observe(duration)
}
func newArchiveSize(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "archive_size_bytes",
Help: "A histogram displaying the session's archive size in bytes.",
Buckets: common.DefaultSizeBuckets,
},
)
}
func (i *imagesImpl) RecordArchiveSize(size float64) {
i.archiveSize.Observe(size)
}
func newUploadingDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "uploading_duration_seconds",
Help: "A histogram displaying the duration of uploading each session's archive to S3 in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (i *imagesImpl) RecordUploadingDuration(duration float64) {
i.uploadingDuration.Observe(duration)
}

View file

@ -1,38 +1,10 @@
package metrics package metrics
import ( import (
"context"
"net/http"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
) )
type MetricServer struct { type MetricServer struct{}
registry *prometheus.Registry
}
func New(log logger.Logger, cs []prometheus.Collector) { func New(log logger.Logger, cs []prometheus.Collector) {}
registry := prometheus.NewRegistry()
// Add go runtime metrics and process collectors.
registry.MustRegister(
collectors.NewGoCollector(),
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
)
// Add extra metrics
registry.MustRegister(cs...)
// Expose /metrics HTTP endpoint using the created custom registry.
http.Handle(
"/metrics", promhttp.HandlerFor(
registry,
promhttp.HandlerOpts{
EnableOpenMetrics: true,
}),
)
go func() {
log.Error(context.Background(), "%v", http.ListenAndServe(":8888", nil))
}()
}

View file

@ -2,184 +2,40 @@ package sink
import ( import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
) )
var sinkMessageSize = prometheus.NewHistogram( type Sink interface {
prometheus.HistogramOpts{ RecordMessageSize(size float64)
Namespace: "sink", IncreaseWrittenMessages()
Name: "message_size_bytes", IncreaseTotalMessages()
Help: "A histogram displaying the size of each message in bytes.", RecordBatchSize(size float64)
Buckets: common.DefaultSizeBuckets, IncreaseTotalBatches()
}, RecordWrittenBytes(size float64, fileType string)
) IncreaseTotalWrittenBytes(size float64, fileType string)
IncreaseCachedAssets()
func RecordMessageSize(size float64) { DecreaseCachedAssets()
sinkMessageSize.Observe(size) IncreaseSkippedAssets()
IncreaseTotalAssets()
RecordAssetSize(size float64)
RecordProcessAssetDuration(durMillis float64)
List() []prometheus.Collector
} }
var sinkWrittenMessages = prometheus.NewCounter( type sinkImpl struct{}
prometheus.CounterOpts{
Namespace: "sink",
Name: "messages_written",
Help: "A counter displaying the total number of all written messages.",
},
)
func IncreaseWrittenMessages() { func New(serviceName string) Sink { return &sinkImpl{} }
sinkWrittenMessages.Inc()
}
var sinkTotalMessages = prometheus.NewCounter( func (s *sinkImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
prometheus.CounterOpts{ func (s *sinkImpl) RecordMessageSize(size float64) {}
Namespace: "sink", func (s *sinkImpl) IncreaseWrittenMessages() {}
Name: "messages_total", func (s *sinkImpl) IncreaseTotalMessages() {}
Help: "A counter displaying the total number of all processed messages.", func (s *sinkImpl) RecordBatchSize(size float64) {}
}, func (s *sinkImpl) IncreaseTotalBatches() {}
) func (s *sinkImpl) RecordWrittenBytes(size float64, fileType string) {}
func (s *sinkImpl) IncreaseTotalWrittenBytes(size float64, fileType string) {}
func IncreaseTotalMessages() { func (s *sinkImpl) IncreaseCachedAssets() {}
sinkTotalMessages.Inc() func (s *sinkImpl) DecreaseCachedAssets() {}
} func (s *sinkImpl) IncreaseSkippedAssets() {}
func (s *sinkImpl) IncreaseTotalAssets() {}
var sinkBatchSize = prometheus.NewHistogram( func (s *sinkImpl) RecordAssetSize(size float64) {}
prometheus.HistogramOpts{ func (s *sinkImpl) RecordProcessAssetDuration(durMillis float64) {}
Namespace: "sink",
Name: "batch_size_bytes",
Help: "A histogram displaying the size of each batch in bytes.",
Buckets: common.DefaultSizeBuckets,
},
)
func RecordBatchSize(size float64) {
sinkBatchSize.Observe(size)
}
var sinkTotalBatches = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "sink",
Name: "batches_total",
Help: "A counter displaying the total number of all written batches.",
},
)
func IncreaseTotalBatches() {
sinkTotalBatches.Inc()
}
var sinkWrittenBytes = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "sink",
Name: "written_bytes",
Help: "A histogram displaying the size of buffer in bytes written to session file.",
Buckets: common.DefaultSizeBuckets,
},
[]string{"file_type"},
)
func RecordWrittenBytes(size float64, fileType string) {
if size == 0 {
return
}
sinkWrittenBytes.WithLabelValues(fileType).Observe(size)
IncreaseTotalWrittenBytes(size, fileType)
}
var sinkTotalWrittenBytes = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "sink",
Name: "written_bytes_total",
Help: "A counter displaying the total number of bytes written to all session files.",
},
[]string{"file_type"},
)
func IncreaseTotalWrittenBytes(size float64, fileType string) {
if size == 0 {
return
}
sinkTotalWrittenBytes.WithLabelValues(fileType).Add(size)
}
var sinkCachedAssets = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: "sink",
Name: "assets_cached",
Help: "A gauge displaying the current number of cached assets.",
},
)
func IncreaseCachedAssets() {
sinkCachedAssets.Inc()
}
func DecreaseCachedAssets() {
sinkCachedAssets.Dec()
}
var sinkSkippedAssets = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "sink",
Name: "assets_skipped",
Help: "A counter displaying the total number of all skipped assets.",
},
)
func IncreaseSkippedAssets() {
sinkSkippedAssets.Inc()
}
var sinkTotalAssets = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "sink",
Name: "assets_total",
Help: "A counter displaying the total number of all processed assets.",
},
)
func IncreaseTotalAssets() {
sinkTotalAssets.Inc()
}
var sinkAssetSize = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "sink",
Name: "asset_size_bytes",
Help: "A histogram displaying the size of each asset in bytes.",
Buckets: common.DefaultSizeBuckets,
},
)
func RecordAssetSize(size float64) {
sinkAssetSize.Observe(size)
}
var sinkProcessAssetDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "sink",
Name: "asset_process_duration_seconds",
Help: "A histogram displaying the duration of processing for each asset in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
func RecordProcessAssetDuration(durMillis float64) {
sinkProcessAssetDuration.Observe(durMillis / 1000.0)
}
func List() []prometheus.Collector {
return []prometheus.Collector{
sinkMessageSize,
sinkWrittenMessages,
sinkTotalMessages,
sinkBatchSize,
sinkTotalBatches,
sinkWrittenBytes,
sinkTotalWrittenBytes,
sinkCachedAssets,
sinkSkippedAssets,
sinkTotalAssets,
sinkAssetSize,
sinkProcessAssetDuration,
}
}

View file

@ -2,148 +2,34 @@ package spot
import ( import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
) )
var spotOriginalVideoSize = prometheus.NewHistogram( type Spot interface {
prometheus.HistogramOpts{ RecordOriginalVideoSize(size float64)
Namespace: "spot", RecordCroppedVideoSize(size float64)
Name: "original_video_size_bytes", IncreaseVideosTotal()
Help: "A histogram displaying the size of each original video in bytes.", IncreaseVideosCropped()
Buckets: common.VideoSizeBuckets, IncreaseVideosTranscoded()
}, RecordOriginalVideoDownloadDuration(durMillis float64)
) RecordCroppingDuration(durMillis float64)
RecordCroppedVideoUploadDuration(durMillis float64)
func RecordOriginalVideoSize(size float64) { RecordTranscodingDuration(durMillis float64)
spotOriginalVideoSize.Observe(size) RecordTranscodedVideoUploadDuration(durMillis float64)
List() []prometheus.Collector
} }
var spotCroppedVideoSize = prometheus.NewHistogram( type spotImpl struct{}
prometheus.HistogramOpts{
Namespace: "spot",
Name: "cropped_video_size_bytes",
Help: "A histogram displaying the size of each cropped video in bytes.",
Buckets: common.VideoSizeBuckets,
},
)
func RecordCroppedVideoSize(size float64) { func New(serviceName string) Spot { return &spotImpl{} }
spotCroppedVideoSize.Observe(size)
}
var spotVideosTotal = prometheus.NewCounter( func (s *spotImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
prometheus.CounterOpts{ func (s *spotImpl) RecordOriginalVideoSize(size float64) {}
Namespace: "spot", func (s *spotImpl) RecordCroppedVideoSize(size float64) {}
Name: "videos_total", func (s *spotImpl) IncreaseVideosTotal() {}
Help: "A counter displaying the total number of all processed videos.", func (s *spotImpl) IncreaseVideosCropped() {}
}, func (s *spotImpl) IncreaseVideosTranscoded() {}
) func (s *spotImpl) RecordOriginalVideoDownloadDuration(durMillis float64) {}
func (s *spotImpl) RecordCroppingDuration(durMillis float64) {}
func IncreaseVideosTotal() { func (s *spotImpl) RecordCroppedVideoUploadDuration(durMillis float64) {}
spotVideosTotal.Inc() func (s *spotImpl) RecordTranscodingDuration(durMillis float64) {}
} func (s *spotImpl) RecordTranscodedVideoUploadDuration(durMillis float64) {}
var spotVideosCropped = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "spot",
Name: "videos_cropped_total",
Help: "A counter displaying the total number of all cropped videos.",
},
)
func IncreaseVideosCropped() {
spotVideosCropped.Inc()
}
var spotVideosTranscoded = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "spot",
Name: "videos_transcoded_total",
Help: "A counter displaying the total number of all transcoded videos.",
},
)
func IncreaseVideosTranscoded() {
spotVideosTranscoded.Inc()
}
var spotOriginalVideoDownloadDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "spot",
Name: "original_video_download_duration_seconds",
Help: "A histogram displaying the duration of downloading each original video in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
func RecordOriginalVideoDownloadDuration(durMillis float64) {
spotOriginalVideoDownloadDuration.Observe(durMillis / 1000.0)
}
var spotCroppingDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "spot",
Name: "cropping_duration_seconds",
Help: "A histogram displaying the duration of cropping each video in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
func RecordCroppingDuration(durMillis float64) {
spotCroppingDuration.Observe(durMillis / 1000.0)
}
var spotCroppedVideoUploadDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "spot",
Name: "cropped_video_upload_duration_seconds",
Help: "A histogram displaying the duration of uploading each cropped video in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
func RecordCroppedVideoUploadDuration(durMillis float64) {
spotCroppedVideoUploadDuration.Observe(durMillis / 1000.0)
}
var spotTranscodingDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "spot",
Name: "transcoding_duration_seconds",
Help: "A histogram displaying the duration of transcoding each video in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
func RecordTranscodingDuration(durMillis float64) {
spotTranscodingDuration.Observe(durMillis / 1000.0)
}
var spotTranscodedVideoUploadDuration = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "spot",
Name: "transcoded_video_upload_duration_seconds",
Help: "A histogram displaying the duration of uploading each transcoded video in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
func RecordTranscodedVideoUploadDuration(durMillis float64) {
spotTranscodedVideoUploadDuration.Observe(durMillis / 1000.0)
}
func List() []prometheus.Collector {
return []prometheus.Collector{
spotOriginalVideoSize,
spotCroppedVideoSize,
spotVideosTotal,
spotVideosCropped,
spotVideosTranscoded,
spotOriginalVideoDownloadDuration,
spotCroppingDuration,
spotCroppedVideoUploadDuration,
spotTranscodingDuration,
spotTranscodedVideoUploadDuration,
}
}

View file

@ -2,154 +2,34 @@ package storage
import ( import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
) )
var storageSessionSize = prometheus.NewHistogramVec( type Storage interface {
prometheus.HistogramOpts{ RecordSessionSize(fileSize float64, fileType string)
Namespace: "storage", IncreaseStorageTotalSessions()
Name: "session_size_bytes", RecordSkippedSessionSize(fileSize float64, fileType string)
Help: "A histogram displaying the size of each session file in bytes prior to any manipulation.", IncreaseStorageTotalSkippedSessions()
Buckets: common.DefaultSizeBuckets, RecordSessionReadDuration(durMillis float64, fileType string)
}, RecordSessionSortDuration(durMillis float64, fileType string)
[]string{"file_type"}, RecordSessionEncryptionDuration(durMillis float64, fileType string)
) RecordSessionCompressDuration(durMillis float64, fileType string)
RecordSessionUploadDuration(durMillis float64, fileType string)
func RecordSessionSize(fileSize float64, fileType string) { RecordSessionCompressionRatio(ratio float64, fileType string)
storageSessionSize.WithLabelValues(fileType).Observe(fileSize) List() []prometheus.Collector
} }
var storageTotalSessions = prometheus.NewCounter( type storageImpl struct{}
prometheus.CounterOpts{
Namespace: "storage",
Name: "sessions_total",
Help: "A counter displaying the total number of all processed sessions.",
},
)
func IncreaseStorageTotalSessions() { func New(serviceName string) Storage { return &storageImpl{} }
storageTotalSessions.Inc()
}
var storageSkippedSessionSize = prometheus.NewHistogramVec( func (s *storageImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
prometheus.HistogramOpts{ func (s *storageImpl) RecordSessionSize(fileSize float64, fileType string) {}
Namespace: "storage", func (s *storageImpl) IncreaseStorageTotalSessions() {}
Name: "session_size_bytes", func (s *storageImpl) RecordSkippedSessionSize(fileSize float64, fileType string) {}
Help: "A histogram displaying the size of each skipped session file in bytes.", func (s *storageImpl) IncreaseStorageTotalSkippedSessions() {}
Buckets: common.DefaultSizeBuckets, func (s *storageImpl) RecordSessionReadDuration(durMillis float64, fileType string) {}
}, func (s *storageImpl) RecordSessionSortDuration(durMillis float64, fileType string) {}
[]string{"file_type"}, func (s *storageImpl) RecordSessionEncryptionDuration(durMillis float64, fileType string) {}
) func (s *storageImpl) RecordSessionCompressDuration(durMillis float64, fileType string) {}
func (s *storageImpl) RecordSessionUploadDuration(durMillis float64, fileType string) {}
func RecordSkippedSessionSize(fileSize float64, fileType string) { func (s *storageImpl) RecordSessionCompressionRatio(ratio float64, fileType string) {}
storageSkippedSessionSize.WithLabelValues(fileType).Observe(fileSize)
}
var storageTotalSkippedSessions = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "storage",
Name: "sessions_skipped_total",
Help: "A counter displaying the total number of all skipped sessions because of the size limits.",
},
)
func IncreaseStorageTotalSkippedSessions() {
storageTotalSkippedSessions.Inc()
}
var storageSessionReadDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "storage",
Name: "read_duration_seconds",
Help: "A histogram displaying the duration of reading for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
func RecordSessionReadDuration(durMillis float64, fileType string) {
storageSessionReadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
}
var storageSessionSortDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "storage",
Name: "sort_duration_seconds",
Help: "A histogram displaying the duration of sorting for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
func RecordSessionSortDuration(durMillis float64, fileType string) {
storageSessionSortDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
}
var storageSessionEncryptionDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "storage",
Name: "encryption_duration_seconds",
Help: "A histogram displaying the duration of encoding for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
func RecordSessionEncryptionDuration(durMillis float64, fileType string) {
storageSessionEncryptionDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
}
var storageSessionCompressDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "storage",
Name: "compress_duration_seconds",
Help: "A histogram displaying the duration of compressing for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
func RecordSessionCompressDuration(durMillis float64, fileType string) {
storageSessionCompressDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
}
var storageSessionUploadDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "storage",
Name: "upload_duration_seconds",
Help: "A histogram displaying the duration of uploading to s3 for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
func RecordSessionUploadDuration(durMillis float64, fileType string) {
storageSessionUploadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
}
var storageSessionCompressionRatio = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "storage",
Name: "compression_ratio",
Help: "A histogram displaying the compression ratio of mob files for each session.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
func RecordSessionCompressionRatio(ratio float64, fileType string) {
storageSessionCompressionRatio.WithLabelValues(fileType).Observe(ratio)
}
func List() []prometheus.Collector {
return []prometheus.Collector{
storageSessionSize,
storageTotalSessions,
storageSessionReadDuration,
storageSessionSortDuration,
storageSessionEncryptionDuration,
storageSessionCompressDuration,
storageSessionUploadDuration,
storageSessionCompressionRatio,
}
}

View file

@ -1,155 +0,0 @@
package videostorage
import (
"github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
)
var storageSessionSize = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "storage",
Name: "session_size_bytes",
Help: "A histogram displaying the size of each session file in bytes prior to any manipulation.",
Buckets: common.DefaultSizeBuckets,
},
[]string{"file_type"},
)
func RecordSessionSize(fileSize float64, fileType string) {
storageSessionSize.WithLabelValues(fileType).Observe(fileSize)
}
var storageTotalSessions = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "storage",
Name: "sessions_total",
Help: "A counter displaying the total number of all processed sessions.",
},
)
func IncreaseStorageTotalSessions() {
storageTotalSessions.Inc()
}
var storageSkippedSessionSize = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "storage",
Name: "session_size_bytes",
Help: "A histogram displaying the size of each skipped session file in bytes.",
Buckets: common.DefaultSizeBuckets,
},
[]string{"file_type"},
)
func RecordSkippedSessionSize(fileSize float64, fileType string) {
storageSkippedSessionSize.WithLabelValues(fileType).Observe(fileSize)
}
var storageTotalSkippedSessions = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "storage",
Name: "sessions_skipped_total",
Help: "A counter displaying the total number of all skipped sessions because of the size limits.",
},
)
func IncreaseStorageTotalSkippedSessions() {
storageTotalSkippedSessions.Inc()
}
var storageSessionReadDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "storage",
Name: "read_duration_seconds",
Help: "A histogram displaying the duration of reading for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
func RecordSessionReadDuration(durMillis float64, fileType string) {
storageSessionReadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
}
var storageSessionSortDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "storage",
Name: "sort_duration_seconds",
Help: "A histogram displaying the duration of sorting for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
func RecordSessionSortDuration(durMillis float64, fileType string) {
storageSessionSortDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
}
var storageSessionEncryptionDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "storage",
Name: "encryption_duration_seconds",
Help: "A histogram displaying the duration of encoding for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
func RecordSessionEncryptionDuration(durMillis float64, fileType string) {
storageSessionEncryptionDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
}
var storageSessionCompressDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "storage",
Name: "compress_duration_seconds",
Help: "A histogram displaying the duration of compressing for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
func RecordSessionCompressDuration(durMillis float64, fileType string) {
storageSessionCompressDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
}
var storageSessionUploadDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "storage",
Name: "upload_duration_seconds",
Help: "A histogram displaying the duration of uploading to s3 for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
func RecordSessionUploadDuration(durMillis float64, fileType string) {
storageSessionUploadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
}
var storageSessionCompressionRatio = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "storage",
Name: "compression_ratio",
Help: "A histogram displaying the compression ratio of mob files for each session.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
func RecordSessionCompressionRatio(ratio float64, fileType string) {
storageSessionCompressionRatio.WithLabelValues(fileType).Observe(ratio)
}
func List() []prometheus.Collector {
return []prometheus.Collector{
storageSessionSize,
storageTotalSessions,
storageSessionReadDuration,
storageSessionSortDuration,
storageSessionEncryptionDuration,
storageSessionCompressDuration,
storageSessionUploadDuration,
storageSessionCompressionRatio,
}
}

View file

@ -1,11 +1,7 @@
package web package web
import ( import (
"strconv"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
) )
type Web interface { type Web interface {
@ -15,70 +11,11 @@ type Web interface {
List() []prometheus.Collector List() []prometheus.Collector
} }
type webImpl struct { type webImpl struct{}
httpRequestSize *prometheus.HistogramVec
httpRequestDuration *prometheus.HistogramVec
httpTotalRequests prometheus.Counter
}
func New(serviceName string) Web { func New(serviceName string) Web { return &webImpl{} }
return &webImpl{
httpRequestSize: newRequestSizeMetric(serviceName),
httpRequestDuration: newRequestDurationMetric(serviceName),
httpTotalRequests: newTotalRequestsMetric(serviceName),
}
}
func (w *webImpl) List() []prometheus.Collector { func (w *webImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
return []prometheus.Collector{ func (w *webImpl) RecordRequestSize(size float64, url string, code int) {}
w.httpRequestSize, func (w *webImpl) RecordRequestDuration(durMillis float64, url string, code int) {}
w.httpRequestDuration, func (w *webImpl) IncreaseTotalRequests() {}
w.httpTotalRequests,
}
}
func newRequestSizeMetric(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "request_size_bytes",
Help: "A histogram displaying the size of each HTTP request in bytes.",
Buckets: common.DefaultSizeBuckets,
},
[]string{"url", "response_code"},
)
}
func (w *webImpl) RecordRequestSize(size float64, url string, code int) {
w.httpRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size)
}
func newRequestDurationMetric(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "request_duration_seconds",
Help: "A histogram displaying the duration of each HTTP request in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"url", "response_code"},
)
}
func (w *webImpl) RecordRequestDuration(durMillis float64, url string, code int) {
w.httpRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0)
}
func newTotalRequestsMetric(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "requests_total",
Help: "A counter displaying the number all HTTP requests.",
},
)
}
func (w *webImpl) IncreaseTotalRequests() {
w.httpTotalRequests.Inc()
}

View file

@ -11,6 +11,8 @@ import (
"openreplay/backend/pkg/metrics/database" "openreplay/backend/pkg/metrics/database"
) )
var ErrDisabledCache = errors.New("cache is disabled")
type Cache interface { type Cache interface {
Set(project *Project) error Set(project *Project) error
GetByID(projectID uint32) (*Project, error) GetByID(projectID uint32) (*Project, error)
@ -18,10 +20,16 @@ type Cache interface {
} }
type cacheImpl struct { type cacheImpl struct {
db *redis.Client db *redis.Client
metrics database.Database
} }
var ErrDisabledCache = errors.New("cache is disabled") func NewCache(db *redis.Client, metrics database.Database) Cache {
return &cacheImpl{
db: db,
metrics: metrics,
}
}
func (c *cacheImpl) Set(project *Project) error { func (c *cacheImpl) Set(project *Project) error {
if c.db == nil { if c.db == nil {
@ -38,8 +46,8 @@ func (c *cacheImpl) Set(project *Project) error {
if _, err = c.db.Redis.Set(fmt.Sprintf("project:key:%s", project.ProjectKey), projectBytes, time.Minute*10).Result(); err != nil { if _, err = c.db.Redis.Set(fmt.Sprintf("project:key:%s", project.ProjectKey), projectBytes, time.Minute*10).Result(); err != nil {
return err return err
} }
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "set", "project") c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "set", "project")
database.IncreaseRedisRequests("set", "project") c.metrics.IncreaseRedisRequests("set", "project")
return nil return nil
} }
@ -52,8 +60,8 @@ func (c *cacheImpl) GetByID(projectID uint32) (*Project, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "project") c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "project")
database.IncreaseRedisRequests("get", "project") c.metrics.IncreaseRedisRequests("get", "project")
project := &Project{} project := &Project{}
if err = json.Unmarshal([]byte(result), project); err != nil { if err = json.Unmarshal([]byte(result), project); err != nil {
return nil, err return nil, err
@ -70,15 +78,11 @@ func (c *cacheImpl) GetByKey(projectKey string) (*Project, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "project") c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "project")
database.IncreaseRedisRequests("get", "project") c.metrics.IncreaseRedisRequests("get", "project")
project := &Project{} project := &Project{}
if err = json.Unmarshal([]byte(result), project); err != nil { if err = json.Unmarshal([]byte(result), project); err != nil {
return nil, err return nil, err
} }
return project, nil return project, nil
} }
func NewCache(db *redis.Client) Cache {
return &cacheImpl{db: db}
}

View file

@ -3,6 +3,7 @@ package projects
import ( import (
"context" "context"
"errors" "errors"
"openreplay/backend/pkg/metrics/database"
"time" "time"
"openreplay/backend/pkg/cache" "openreplay/backend/pkg/cache"
@ -24,8 +25,8 @@ type projectsImpl struct {
projectsByKeys cache.Cache projectsByKeys cache.Cache
} }
func New(log logger.Logger, db pool.Pool, redis *redis.Client) Projects { func New(log logger.Logger, db pool.Pool, redis *redis.Client, metrics database.Database) Projects {
cl := NewCache(redis) cl := NewCache(redis, metrics)
return &projectsImpl{ return &projectsImpl{
log: log, log: log,
db: db, db: db,

View file

@ -2,6 +2,7 @@ package tracer
import ( import (
"net/http" "net/http"
"openreplay/backend/pkg/metrics/database"
db "openreplay/backend/pkg/db/postgres/pool" db "openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
@ -14,7 +15,7 @@ type Tracer interface {
type tracerImpl struct{} type tracerImpl struct{}
func NewTracer(log logger.Logger, conn db.Pool) (Tracer, error) { func NewTracer(log logger.Logger, conn db.Pool, metrics database.Database) (Tracer, error) {
return &tracerImpl{}, nil return &tracerImpl{}, nil
} }

View file

@ -3,6 +3,7 @@ package sessions
import ( import (
"errors" "errors"
"openreplay/backend/pkg/db/redis" "openreplay/backend/pkg/db/redis"
"openreplay/backend/pkg/metrics/database"
) )
type cacheImpl struct{} type cacheImpl struct{}
@ -25,6 +26,6 @@ func (c *cacheImpl) Get(sessionID uint64) (*Session, error) {
var ErrDisabledCache = errors.New("cache is disabled") var ErrDisabledCache = errors.New("cache is disabled")
func NewCache(db *redis.Client) Cache { func NewCache(db *redis.Client, metrics database.Database) Cache {
return &cacheImpl{} return &cacheImpl{}
} }

View file

@ -3,6 +3,7 @@ package sessions
import ( import (
"context" "context"
"fmt" "fmt"
"openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/db/postgres/pool" "openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/db/redis" "openreplay/backend/pkg/db/redis"
@ -38,12 +39,12 @@ type sessionsImpl struct {
projects projects.Projects projects projects.Projects
} }
func New(log logger.Logger, db pool.Pool, proj projects.Projects, redis *redis.Client) Sessions { func New(log logger.Logger, db pool.Pool, proj projects.Projects, redis *redis.Client, metrics database.Database) Sessions {
return &sessionsImpl{ return &sessionsImpl{
log: log, log: log,
cache: NewInMemoryCache(log, NewCache(redis)), cache: NewInMemoryCache(log, NewCache(redis, metrics)),
storage: NewStorage(db), storage: NewStorage(db),
updates: NewSessionUpdates(log, db), updates: NewSessionUpdates(log, db, metrics),
projects: proj, projects: proj,
} }
} }

View file

@ -27,13 +27,15 @@ type updatesImpl struct {
log logger.Logger log logger.Logger
db pool.Pool db pool.Pool
updates map[uint64]*sessionUpdate updates map[uint64]*sessionUpdate
metrics database.Database
} }
func NewSessionUpdates(log logger.Logger, db pool.Pool) Updates { func NewSessionUpdates(log logger.Logger, db pool.Pool, metrics database.Database) Updates {
return &updatesImpl{ return &updatesImpl{
log: log, log: log,
db: db, db: db,
updates: make(map[uint64]*sessionUpdate), updates: make(map[uint64]*sessionUpdate),
metrics: metrics,
} }
} }
@ -94,7 +96,7 @@ func (u *updatesImpl) Commit() {
} }
} }
// Record batch size // Record batch size
database.RecordBatchElements(float64(b.Len())) u.metrics.RecordBatchElements(float64(b.Len()))
start := time.Now() start := time.Now()
@ -121,7 +123,7 @@ func (u *updatesImpl) Commit() {
} }
} }
} }
database.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds())) u.metrics.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds()))
u.updates = make(map[uint64]*sessionUpdate) u.updates = make(map[uint64]*sessionUpdate)
} }

View file

@ -1,12 +1,14 @@
package spot package spot
import ( import (
"openreplay/backend/pkg/metrics/database"
"time" "time"
"openreplay/backend/internal/config/spot" "openreplay/backend/internal/config/spot"
"openreplay/backend/pkg/db/postgres/pool" "openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/flakeid" "openreplay/backend/pkg/flakeid"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
spotMetrics "openreplay/backend/pkg/metrics/spot"
"openreplay/backend/pkg/metrics/web" "openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/objectstorage/store" "openreplay/backend/pkg/objectstorage/store"
"openreplay/backend/pkg/server/api" "openreplay/backend/pkg/server/api"
@ -26,16 +28,16 @@ type ServicesBuilder struct {
SpotsAPI api.Handlers SpotsAPI api.Handlers
} }
func NewServiceBuilder(log logger.Logger, cfg *spot.Config, webMetrics web.Web, pgconn pool.Pool, prefix string) (*ServicesBuilder, error) { func NewServiceBuilder(log logger.Logger, cfg *spot.Config, webMetrics web.Web, spotMetrics spotMetrics.Spot, dbMetrics database.Database, pgconn pool.Pool, prefix string) (*ServicesBuilder, error) {
objStore, err := store.NewStore(&cfg.ObjectsConfig) objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil { if err != nil {
return nil, err return nil, err
} }
flaker := flakeid.NewFlaker(cfg.WorkerID) flaker := flakeid.NewFlaker(cfg.WorkerID)
spots := service.NewSpots(log, pgconn, flaker) spots := service.NewSpots(log, pgconn, flaker)
transcoder := transcoder.NewTranscoder(cfg, log, objStore, pgconn, spots) transcoder := transcoder.NewTranscoder(cfg, log, objStore, pgconn, spots, spotMetrics)
keys := keys.NewKeys(log, pgconn) keys := keys.NewKeys(log, pgconn)
auditrail, err := tracer.NewTracer(log, pgconn) auditrail, err := tracer.NewTracer(log, pgconn, dbMetrics)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View file

@ -16,7 +16,7 @@ import (
"openreplay/backend/internal/config/spot" "openreplay/backend/internal/config/spot"
"openreplay/backend/pkg/db/postgres/pool" "openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
metrics "openreplay/backend/pkg/metrics/spot" spotMetrics "openreplay/backend/pkg/metrics/spot"
"openreplay/backend/pkg/objectstorage" "openreplay/backend/pkg/objectstorage"
workers "openreplay/backend/pkg/pool" workers "openreplay/backend/pkg/pool"
"openreplay/backend/pkg/spot/service" "openreplay/backend/pkg/spot/service"
@ -39,9 +39,10 @@ type transcoderImpl struct {
spots service.Spots spots service.Spots
prepareWorkers workers.WorkerPool prepareWorkers workers.WorkerPool
transcodeWorkers workers.WorkerPool transcodeWorkers workers.WorkerPool
metrics spotMetrics.Spot
} }
func NewTranscoder(cfg *spot.Config, log logger.Logger, objStorage objectstorage.ObjectStorage, conn pool.Pool, spots service.Spots) Transcoder { func NewTranscoder(cfg *spot.Config, log logger.Logger, objStorage objectstorage.ObjectStorage, conn pool.Pool, spots service.Spots, metrics spotMetrics.Spot) Transcoder {
tnsc := &transcoderImpl{ tnsc := &transcoderImpl{
cfg: cfg, cfg: cfg,
log: log, log: log,
@ -114,7 +115,7 @@ func (t *transcoderImpl) doneTask(task *Task) {
} }
func (t *transcoderImpl) process(task *Task) { func (t *transcoderImpl) process(task *Task) {
metrics.IncreaseVideosTotal() t.metrics.IncreaseVideosTotal()
//spotID := task.SpotID //spotID := task.SpotID
t.log.Info(context.Background(), "Processing spot %s", task.SpotID) t.log.Info(context.Background(), "Processing spot %s", task.SpotID)
@ -200,11 +201,11 @@ func (t *transcoderImpl) downloadSpotVideo(spotID uint64, path string) error {
if fileInfo, err := originVideo.Stat(); err != nil { if fileInfo, err := originVideo.Stat(); err != nil {
t.log.Error(context.Background(), "Failed to get file info: %v", err) t.log.Error(context.Background(), "Failed to get file info: %v", err)
} else { } else {
metrics.RecordOriginalVideoSize(float64(fileInfo.Size())) t.metrics.RecordOriginalVideoSize(float64(fileInfo.Size()))
} }
originVideo.Close() originVideo.Close()
metrics.RecordOriginalVideoDownloadDuration(time.Since(start).Seconds()) t.metrics.RecordOriginalVideoDownloadDuration(time.Since(start).Seconds())
t.log.Info(context.Background(), "Saved origin video to disk, spot: %d in %v sec", spotID, time.Since(start).Seconds()) t.log.Info(context.Background(), "Saved origin video to disk, spot: %d in %v sec", spotID, time.Since(start).Seconds())
return nil return nil
@ -227,8 +228,8 @@ func (t *transcoderImpl) cropSpotVideo(spotID uint64, crop []int, path string) e
if err != nil { if err != nil {
return fmt.Errorf("failed to execute command: %v, stderr: %v", err, stderr.String()) return fmt.Errorf("failed to execute command: %v, stderr: %v", err, stderr.String())
} }
metrics.IncreaseVideosCropped() t.metrics.IncreaseVideosCropped()
metrics.RecordCroppingDuration(time.Since(start).Seconds()) t.metrics.RecordCroppingDuration(time.Since(start).Seconds())
t.log.Info(context.Background(), "Cropped spot %d in %v", spotID, time.Since(start).Seconds()) t.log.Info(context.Background(), "Cropped spot %d in %v", spotID, time.Since(start).Seconds())
@ -246,7 +247,7 @@ func (t *transcoderImpl) cropSpotVideo(spotID uint64, crop []int, path string) e
if fileInfo, err := video.Stat(); err != nil { if fileInfo, err := video.Stat(); err != nil {
t.log.Error(context.Background(), "Failed to get file info: %v", err) t.log.Error(context.Background(), "Failed to get file info: %v", err)
} else { } else {
metrics.RecordCroppedVideoSize(float64(fileInfo.Size())) t.metrics.RecordCroppedVideoSize(float64(fileInfo.Size()))
} }
err = t.objStorage.Upload(video, fmt.Sprintf("%d/video.webm", spotID), "video/webm", objectstorage.NoContentEncoding, objectstorage.NoCompression) err = t.objStorage.Upload(video, fmt.Sprintf("%d/video.webm", spotID), "video/webm", objectstorage.NoContentEncoding, objectstorage.NoCompression)
@ -254,7 +255,7 @@ func (t *transcoderImpl) cropSpotVideo(spotID uint64, crop []int, path string) e
return fmt.Errorf("failed to upload cropped video: %v", err) return fmt.Errorf("failed to upload cropped video: %v", err)
} }
metrics.RecordCroppedVideoUploadDuration(time.Since(start).Seconds()) t.metrics.RecordCroppedVideoUploadDuration(time.Since(start).Seconds())
t.log.Info(context.Background(), "Uploaded cropped spot %d in %v", spotID, time.Since(start).Seconds()) t.log.Info(context.Background(), "Uploaded cropped spot %d in %v", spotID, time.Since(start).Seconds())
return nil return nil
@ -279,8 +280,8 @@ func (t *transcoderImpl) transcodeSpotVideo(spotID uint64, path string) (string,
t.log.Error(context.Background(), "Failed to execute command: %v, stderr: %v", err, stderr.String()) t.log.Error(context.Background(), "Failed to execute command: %v, stderr: %v", err, stderr.String())
return "", err return "", err
} }
metrics.IncreaseVideosTranscoded() t.metrics.IncreaseVideosTranscoded()
metrics.RecordTranscodingDuration(time.Since(start).Seconds()) t.metrics.RecordTranscodingDuration(time.Since(start).Seconds())
t.log.Info(context.Background(), "Transcoded spot %d in %v", spotID, time.Since(start).Seconds()) t.log.Info(context.Background(), "Transcoded spot %d in %v", spotID, time.Since(start).Seconds())
start = time.Now() start = time.Now()
@ -327,7 +328,7 @@ func (t *transcoderImpl) transcodeSpotVideo(spotID uint64, path string) (string,
return "", err return "", err
} }
} }
metrics.RecordTranscodedVideoUploadDuration(time.Since(start).Seconds()) t.metrics.RecordTranscodedVideoUploadDuration(time.Since(start).Seconds())
t.log.Info(context.Background(), "Uploaded chunks for spot %d in %v", spotID, time.Since(start).Seconds()) t.log.Info(context.Background(), "Uploaded chunks for spot %d in %v", spotID, time.Since(start).Seconds())
return strings.Join(lines, "\n"), nil return strings.Join(lines, "\n"), nil

View file

@ -0,0 +1,106 @@
package assets
import (
"strconv"
"github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
)
type Assets interface {
IncreaseProcessesSessions()
IncreaseSavedSessions()
RecordDownloadDuration(durMillis float64, code int)
RecordUploadDuration(durMillis float64, isFailed bool)
List() []prometheus.Collector
}
type assetsImpl struct {
assetsProcessedSessions prometheus.Counter
assetsSavedSessions prometheus.Counter
assetsDownloadDuration *prometheus.HistogramVec
assetsUploadDuration *prometheus.HistogramVec
}
func New(serviceName string) Assets {
return &assetsImpl{
assetsProcessedSessions: newProcessedSessions(serviceName),
assetsSavedSessions: newSavedSessions(serviceName),
assetsDownloadDuration: newDownloadDuration(serviceName),
assetsUploadDuration: newUploadDuration(serviceName),
}
}
func (a *assetsImpl) List() []prometheus.Collector {
return []prometheus.Collector{
a.assetsProcessedSessions,
a.assetsSavedSessions,
a.assetsDownloadDuration,
a.assetsUploadDuration,
}
}
func newProcessedSessions(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "processed_total",
Help: "A counter displaying the total count of processed assets.",
},
)
}
func (a *assetsImpl) IncreaseProcessesSessions() {
a.assetsProcessedSessions.Inc()
}
func newSavedSessions(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "saved_total",
Help: "A counter displaying the total number of cached assets.",
},
)
}
func (a *assetsImpl) IncreaseSavedSessions() {
a.assetsSavedSessions.Inc()
}
func newDownloadDuration(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "download_duration_seconds",
Help: "A histogram displaying the duration of downloading for each asset in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"response_code"},
)
}
func (a *assetsImpl) RecordDownloadDuration(durMillis float64, code int) {
a.assetsDownloadDuration.WithLabelValues(strconv.Itoa(code)).Observe(durMillis / 1000.0)
}
func newUploadDuration(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "upload_s3_duration_seconds",
Help: "A histogram displaying the duration of uploading to s3 for each asset in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"failed"},
)
}
func (a *assetsImpl) RecordUploadDuration(durMillis float64, isFailed bool) {
failed := "false"
if isFailed {
failed = "true"
}
a.assetsUploadDuration.WithLabelValues(failed).Observe(durMillis / 1000.0)
}

View file

@ -0,0 +1,193 @@
package canvas
import (
"github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
)
type Canvas interface {
RecordCanvasImageSize(size float64)
IncreaseTotalSavedImages()
RecordImagesPerCanvas(number float64)
RecordCanvasesPerSession(number float64)
RecordPreparingDuration(duration float64)
IncreaseTotalCreatedArchives()
RecordArchivingDuration(duration float64)
RecordArchiveSize(size float64)
RecordUploadingDuration(duration float64)
List() []prometheus.Collector
}
type canvasImpl struct {
canvasesImageSize prometheus.Histogram
canvasesTotalSavedImages prometheus.Counter
canvasesImagesPerCanvas prometheus.Histogram
canvasesCanvasesPerSession prometheus.Histogram
canvasesPreparingDuration prometheus.Histogram
canvasesTotalCreatedArchives prometheus.Counter
canvasesArchivingDuration prometheus.Histogram
canvasesArchiveSize prometheus.Histogram
canvasesUploadingDuration prometheus.Histogram
}
func New(serviceName string) Canvas {
return &canvasImpl{
canvasesImageSize: newImageSizeMetric(serviceName),
canvasesTotalSavedImages: newTotalSavedImages(serviceName),
canvasesImagesPerCanvas: newImagesPerCanvas(serviceName),
canvasesCanvasesPerSession: newCanvasesPerSession(serviceName),
canvasesPreparingDuration: newPreparingDuration(serviceName),
canvasesTotalCreatedArchives: newTotalCreatedArchives(serviceName),
canvasesArchivingDuration: newArchivingDuration(serviceName),
canvasesArchiveSize: newArchiveSize(serviceName),
canvasesUploadingDuration: newUploadingDuration(serviceName),
}
}
func (c *canvasImpl) List() []prometheus.Collector {
return []prometheus.Collector{
c.canvasesImageSize,
c.canvasesTotalSavedImages,
c.canvasesImagesPerCanvas,
c.canvasesCanvasesPerSession,
c.canvasesPreparingDuration,
c.canvasesTotalCreatedArchives,
c.canvasesArchivingDuration,
c.canvasesArchiveSize,
c.canvasesUploadingDuration,
}
}
func newImageSizeMetric(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "image_size_bytes",
Help: "A histogram displaying the size of each canvas image in bytes.",
Buckets: common.DefaultSizeBuckets,
},
)
}
func (c *canvasImpl) RecordCanvasImageSize(size float64) {
c.canvasesImageSize.Observe(size)
}
func newTotalSavedImages(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "total_saved_images",
Help: "A counter displaying the total number of saved images.",
},
)
}
func (c *canvasImpl) IncreaseTotalSavedImages() {
c.canvasesTotalSavedImages.Inc()
}
func newImagesPerCanvas(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "images_per_canvas",
Help: "A histogram displaying the number of images per canvas.",
Buckets: common.DefaultBuckets,
},
)
}
func (c *canvasImpl) RecordImagesPerCanvas(number float64) {
c.canvasesImagesPerCanvas.Observe(number)
}
func newCanvasesPerSession(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "canvases_per_session",
Help: "A histogram displaying the number of canvases per session.",
Buckets: common.DefaultBuckets,
},
)
}
func (c *canvasImpl) RecordCanvasesPerSession(number float64) {
c.canvasesCanvasesPerSession.Observe(number)
}
func newPreparingDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "preparing_duration_seconds",
Help: "A histogram displaying the duration of preparing the list of canvases for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (c *canvasImpl) RecordPreparingDuration(duration float64) {
c.canvasesPreparingDuration.Observe(duration)
}
func newTotalCreatedArchives(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "total_created_archives",
Help: "A counter displaying the total number of created canvas archives.",
},
)
}
func (c *canvasImpl) IncreaseTotalCreatedArchives() {
c.canvasesTotalCreatedArchives.Inc()
}
func newArchivingDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "archiving_duration_seconds",
Help: "A histogram displaying the duration of archiving for each canvas in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (c *canvasImpl) RecordArchivingDuration(duration float64) {
c.canvasesArchivingDuration.Observe(duration)
}
func newArchiveSize(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "archive_size_bytes",
Help: "A histogram displaying the size of each canvas archive in bytes.",
Buckets: common.DefaultSizeBuckets,
},
)
}
func (c *canvasImpl) RecordArchiveSize(size float64) {
c.canvasesArchiveSize.Observe(size)
}
func newUploadingDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "uploading_duration_seconds",
Help: "A histogram displaying the duration of uploading for each canvas in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (c *canvasImpl) RecordUploadingDuration(duration float64) {
c.canvasesUploadingDuration.Observe(duration)
}

View file

@ -0,0 +1,200 @@
package database
import (
"github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
)
type Database interface {
RecordBatchElements(number float64)
RecordBatchInsertDuration(durMillis float64)
RecordBulkSize(size float64, db, table string)
RecordBulkElements(size float64, db, table string)
RecordBulkInsertDuration(durMillis float64, db, table string)
RecordRequestDuration(durMillis float64, method, table string)
IncreaseTotalRequests(method, table string)
IncreaseRedisRequests(method, table string)
RecordRedisRequestDuration(durMillis float64, method, table string)
List() []prometheus.Collector
}
type databaseImpl struct {
dbBatchElements prometheus.Histogram
dbBatchInsertDuration prometheus.Histogram
dbBulkSize *prometheus.HistogramVec
dbBulkElements *prometheus.HistogramVec
dbBulkInsertDuration *prometheus.HistogramVec
dbRequestDuration *prometheus.HistogramVec
dbTotalRequests *prometheus.CounterVec
cacheRedisRequests *prometheus.CounterVec
cacheRedisRequestDuration *prometheus.HistogramVec
}
func New(serviceName string) Database {
return &databaseImpl{
dbBatchElements: newBatchElements(serviceName),
dbBatchInsertDuration: newBatchInsertDuration(serviceName),
dbBulkSize: newBulkSize(serviceName),
dbBulkElements: newBulkElements(serviceName),
dbBulkInsertDuration: newBulkInsertDuration(serviceName),
dbRequestDuration: newRequestDuration(serviceName),
dbTotalRequests: newTotalRequests(serviceName),
cacheRedisRequests: newRedisRequests(serviceName),
cacheRedisRequestDuration: newRedisRequestDuration(serviceName),
}
}
func (d *databaseImpl) List() []prometheus.Collector {
return []prometheus.Collector{
d.dbBatchElements,
d.dbBatchInsertDuration,
d.dbBulkSize,
d.dbBulkElements,
d.dbBulkInsertDuration,
d.dbRequestDuration,
d.dbTotalRequests,
d.cacheRedisRequests,
d.cacheRedisRequestDuration,
}
}
func newBatchElements(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "batch_size_elements",
Help: "A histogram displaying the number of SQL commands in each batch.",
Buckets: common.DefaultBuckets,
},
)
}
func (d *databaseImpl) RecordBatchElements(number float64) {
d.dbBatchElements.Observe(number)
}
func newBatchInsertDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "batch_insert_duration_seconds",
Help: "A histogram displaying the duration of batch inserts in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (d *databaseImpl) RecordBatchInsertDuration(durMillis float64) {
d.dbBatchInsertDuration.Observe(durMillis / 1000.0)
}
func newBulkSize(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "bulk_size_bytes",
Help: "A histogram displaying the bulk size in bytes.",
Buckets: common.DefaultSizeBuckets,
},
[]string{"db", "table"},
)
}
func (d *databaseImpl) RecordBulkSize(size float64, db, table string) {
d.dbBulkSize.WithLabelValues(db, table).Observe(size)
}
func newBulkElements(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "bulk_size_elements",
Help: "A histogram displaying the size of data set in each bulk.",
Buckets: common.DefaultBuckets,
},
[]string{"db", "table"},
)
}
func (d *databaseImpl) RecordBulkElements(size float64, db, table string) {
d.dbBulkElements.WithLabelValues(db, table).Observe(size)
}
func newBulkInsertDuration(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "bulk_insert_duration_seconds",
Help: "A histogram displaying the duration of bulk inserts in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"db", "table"},
)
}
func (d *databaseImpl) RecordBulkInsertDuration(durMillis float64, db, table string) {
d.dbBulkInsertDuration.WithLabelValues(db, table).Observe(durMillis / 1000.0)
}
func newRequestDuration(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "request_duration_seconds",
Help: "A histogram displaying the duration of each sql request in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"method", "table"},
)
}
func (d *databaseImpl) RecordRequestDuration(durMillis float64, method, table string) {
d.dbRequestDuration.WithLabelValues(method, table).Observe(durMillis / 1000.0)
}
func newTotalRequests(serviceName string) *prometheus.CounterVec {
return prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "requests_total",
Help: "A counter showing the total number of all SQL requests.",
},
[]string{"method", "table"},
)
}
func (d *databaseImpl) IncreaseTotalRequests(method, table string) {
d.dbTotalRequests.WithLabelValues(method, table).Inc()
}
func newRedisRequests(serviceName string) *prometheus.CounterVec {
return prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "redis_requests_total",
Help: "A counter showing the total number of all Redis requests.",
},
[]string{"method", "table"},
)
}
func (d *databaseImpl) IncreaseRedisRequests(method, table string) {
d.cacheRedisRequests.WithLabelValues(method, table).Inc()
}
func newRedisRequestDuration(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "redis_request_duration_seconds",
Help: "A histogram displaying the duration of each Redis request in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"method", "table"},
)
}
func (d *databaseImpl) RecordRedisRequestDuration(durMillis float64, method, table string) {
d.cacheRedisRequestDuration.WithLabelValues(method, table).Observe(durMillis / 1000.0)
}

View file

@ -0,0 +1,79 @@
package ender
import "github.com/prometheus/client_golang/prometheus"
type Ender interface {
IncreaseActiveSessions()
DecreaseActiveSessions()
IncreaseClosedSessions()
IncreaseTotalSessions()
List() []prometheus.Collector
}
type enderImpl struct {
activeSessions prometheus.Gauge
closedSessions prometheus.Counter
totalSessions prometheus.Counter
}
func New(serviceName string) Ender {
return &enderImpl{
activeSessions: newActiveSessions(serviceName),
closedSessions: newClosedSessions(serviceName),
totalSessions: newTotalSessions(serviceName),
}
}
func (e *enderImpl) List() []prometheus.Collector {
return []prometheus.Collector{
e.activeSessions,
e.closedSessions,
e.totalSessions,
}
}
func newActiveSessions(serviceName string) prometheus.Gauge {
return prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: serviceName,
Name: "sessions_active",
Help: "A gauge displaying the number of active (live) sessions.",
},
)
}
func (e *enderImpl) IncreaseActiveSessions() {
e.activeSessions.Inc()
}
func (e *enderImpl) DecreaseActiveSessions() {
e.activeSessions.Dec()
}
func newClosedSessions(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "sessions_closed",
Help: "A counter displaying the number of closed sessions (sent SessionEnd).",
},
)
}
func (e *enderImpl) IncreaseClosedSessions() {
e.closedSessions.Inc()
}
func newTotalSessions(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "sessions_total",
Help: "A counter displaying the number of all processed sessions.",
},
)
}
func (e *enderImpl) IncreaseTotalSessions() {
e.totalSessions.Inc()
}

View file

@ -0,0 +1,41 @@
package heuristics
import (
"github.com/prometheus/client_golang/prometheus"
)
type Heuristics interface {
IncreaseTotalEvents(eventType string)
List() []prometheus.Collector
}
type heuristicsImpl struct {
totalEvents *prometheus.CounterVec
}
func New(serviceName string) Heuristics {
return &heuristicsImpl{
totalEvents: newTotalEvents(serviceName),
}
}
func newTotalEvents(serviceName string) *prometheus.CounterVec {
return prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "events_total",
Help: "A counter displaying the number of all processed events",
},
[]string{"type"},
)
}
func (h *heuristicsImpl) IncreaseTotalEvents(eventType string) {
h.totalEvents.WithLabelValues(eventType).Inc()
}
func (h *heuristicsImpl) List() []prometheus.Collector {
return []prometheus.Collector{
h.totalEvents,
}
}

View file

@ -0,0 +1,192 @@
package images
import (
"github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
)
type Images interface {
RecordOriginalArchiveSize(size float64)
RecordOriginalArchiveExtractionDuration(duration float64)
IncreaseTotalSavedArchives()
RecordSavingImageDuration(duration float64)
IncreaseTotalSavedImages()
IncreaseTotalCreatedArchives()
RecordArchivingDuration(duration float64)
RecordArchiveSize(size float64)
RecordUploadingDuration(duration float64)
List() []prometheus.Collector
}
type imagesImpl struct {
originalArchiveSize prometheus.Histogram
originalArchiveExtractionDuration prometheus.Histogram
totalSavedArchives prometheus.Counter
savingImageDuration prometheus.Histogram
totalSavedImages prometheus.Counter
totalCreatedArchives prometheus.Counter
archivingDuration prometheus.Histogram
archiveSize prometheus.Histogram
uploadingDuration prometheus.Histogram
}
func New(serviceName string) Images {
return &imagesImpl{
originalArchiveSize: newOriginalArchiveSize(serviceName),
originalArchiveExtractionDuration: newOriginalArchiveExtractionDuration(serviceName),
totalSavedArchives: newTotalSavedArchives(serviceName),
savingImageDuration: newSavingImageDuration(serviceName),
totalSavedImages: newTotalSavedImages(serviceName),
totalCreatedArchives: newTotalCreatedArchives(serviceName),
archivingDuration: newArchivingDuration(serviceName),
archiveSize: newArchiveSize(serviceName),
uploadingDuration: newUploadingDuration(serviceName),
}
}
func (i *imagesImpl) List() []prometheus.Collector {
return []prometheus.Collector{
i.originalArchiveSize,
i.originalArchiveExtractionDuration,
i.totalSavedArchives,
i.savingImageDuration,
i.totalSavedImages,
i.totalCreatedArchives,
i.archivingDuration,
i.archiveSize,
i.uploadingDuration,
}
}
func newOriginalArchiveSize(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "original_archive_size_bytes",
Help: "A histogram displaying the original archive size in bytes.",
Buckets: common.DefaultSizeBuckets,
},
)
}
func (i *imagesImpl) RecordOriginalArchiveSize(size float64) {
i.archiveSize.Observe(size)
}
func newOriginalArchiveExtractionDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "original_archive_extraction_duration_seconds",
Help: "A histogram displaying the duration of extracting the original archive.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (i *imagesImpl) RecordOriginalArchiveExtractionDuration(duration float64) {
i.originalArchiveExtractionDuration.Observe(duration)
}
func newTotalSavedArchives(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "total_saved_archives",
Help: "A counter displaying the total number of saved original archives.",
},
)
}
func (i *imagesImpl) IncreaseTotalSavedArchives() {
i.totalSavedArchives.Inc()
}
func newSavingImageDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "saving_image_duration_seconds",
Help: "A histogram displaying the duration of saving each image in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (i *imagesImpl) RecordSavingImageDuration(duration float64) {
i.savingImageDuration.Observe(duration)
}
func newTotalSavedImages(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "total_saved_images",
Help: "A counter displaying the total number of saved images.",
},
)
}
func (i *imagesImpl) IncreaseTotalSavedImages() {
i.totalSavedImages.Inc()
}
func newTotalCreatedArchives(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "total_created_archives",
Help: "A counter displaying the total number of created archives.",
},
)
}
func (i *imagesImpl) IncreaseTotalCreatedArchives() {
i.totalCreatedArchives.Inc()
}
func newArchivingDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "archiving_duration_seconds",
Help: "A histogram displaying the duration of archiving each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (i *imagesImpl) RecordArchivingDuration(duration float64) {
i.archivingDuration.Observe(duration)
}
func newArchiveSize(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "archive_size_bytes",
Help: "A histogram displaying the session's archive size in bytes.",
Buckets: common.DefaultSizeBuckets,
},
)
}
func (i *imagesImpl) RecordArchiveSize(size float64) {
i.archiveSize.Observe(size)
}
func newUploadingDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "uploading_duration_seconds",
Help: "A histogram displaying the duration of uploading each session's archive to S3 in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (i *imagesImpl) RecordUploadingDuration(duration float64) {
i.uploadingDuration.Observe(duration)
}

View file

@ -0,0 +1,38 @@
package metrics
import (
"context"
"net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/prometheus/client_golang/prometheus/promhttp"
"openreplay/backend/pkg/logger"
)
type MetricServer struct {
registry *prometheus.Registry
}
func New(log logger.Logger, cs []prometheus.Collector) {
registry := prometheus.NewRegistry()
// Add go runtime metrics and process collectors.
registry.MustRegister(
collectors.NewGoCollector(),
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
)
// Add extra metrics
registry.MustRegister(cs...)
// Expose /metrics HTTP endpoint using the created custom registry.
http.Handle(
"/metrics", promhttp.HandlerFor(
registry,
promhttp.HandlerOpts{
EnableOpenMetrics: true,
}),
)
go func() {
log.Error(context.Background(), "%v", http.ListenAndServe(":8888", nil))
}()
}

View file

@ -0,0 +1,259 @@
package sink
import (
"github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
)
type Sink interface {
RecordMessageSize(size float64)
IncreaseWrittenMessages()
IncreaseTotalMessages()
RecordBatchSize(size float64)
IncreaseTotalBatches()
RecordWrittenBytes(size float64, fileType string)
IncreaseTotalWrittenBytes(size float64, fileType string)
IncreaseCachedAssets()
DecreaseCachedAssets()
IncreaseSkippedAssets()
IncreaseTotalAssets()
RecordAssetSize(size float64)
RecordProcessAssetDuration(durMillis float64)
List() []prometheus.Collector
}
type sinkImpl struct {
messageSize prometheus.Histogram
writtenMessages prometheus.Counter
totalMessages prometheus.Counter
batchSize prometheus.Histogram
totalBatches prometheus.Counter
writtenBytes *prometheus.HistogramVec
totalWrittenBytes *prometheus.CounterVec
cachedAssets prometheus.Gauge
skippedAssets prometheus.Counter
totalAssets prometheus.Counter
assetSize prometheus.Histogram
processAssetDuration prometheus.Histogram
}
func New(serviceName string) Sink {
return &sinkImpl{
messageSize: newMessageSize(serviceName),
writtenMessages: newWrittenMessages(serviceName),
totalMessages: newTotalMessages(serviceName),
batchSize: newBatchSize(serviceName),
totalBatches: newTotalBatches(serviceName),
writtenBytes: newWrittenBytes(serviceName),
totalWrittenBytes: newTotalWrittenBytes(serviceName),
cachedAssets: newCachedAssets(serviceName),
skippedAssets: newSkippedAssets(serviceName),
totalAssets: newTotalAssets(serviceName),
assetSize: newAssetSize(serviceName),
processAssetDuration: newProcessAssetDuration(serviceName),
}
}
func (s *sinkImpl) List() []prometheus.Collector {
return []prometheus.Collector{
s.messageSize,
s.writtenMessages,
s.totalMessages,
s.batchSize,
s.totalBatches,
s.writtenBytes,
s.totalWrittenBytes,
s.cachedAssets,
s.skippedAssets,
s.totalAssets,
s.assetSize,
s.processAssetDuration,
}
}
func newMessageSize(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "message_size_bytes",
Help: "A histogram displaying the size of each message in bytes.",
Buckets: common.DefaultSizeBuckets,
},
)
}
func (s *sinkImpl) RecordMessageSize(size float64) {
s.messageSize.Observe(size)
}
func newWrittenMessages(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "messages_written",
Help: "A counter displaying the total number of all written messages.",
},
)
}
func (s *sinkImpl) IncreaseWrittenMessages() {
s.writtenMessages.Inc()
}
func newTotalMessages(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "messages_total",
Help: "A counter displaying the total number of all processed messages.",
},
)
}
func (s *sinkImpl) IncreaseTotalMessages() {
s.totalMessages.Inc()
}
func newBatchSize(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "batch_size_bytes",
Help: "A histogram displaying the size of each batch in bytes.",
Buckets: common.DefaultSizeBuckets,
},
)
}
func (s *sinkImpl) RecordBatchSize(size float64) {
s.batchSize.Observe(size)
}
func newTotalBatches(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "batches_total",
Help: "A counter displaying the total number of all written batches.",
},
)
}
func (s *sinkImpl) IncreaseTotalBatches() {
s.totalBatches.Inc()
}
func newWrittenBytes(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "written_bytes",
Help: "A histogram displaying the size of buffer in bytes written to session file.",
Buckets: common.DefaultSizeBuckets,
},
[]string{"file_type"},
)
}
func (s *sinkImpl) RecordWrittenBytes(size float64, fileType string) {
if size == 0 {
return
}
s.writtenBytes.WithLabelValues(fileType).Observe(size)
s.IncreaseTotalWrittenBytes(size, fileType)
}
func newTotalWrittenBytes(serviceName string) *prometheus.CounterVec {
return prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "written_bytes_total",
Help: "A counter displaying the total number of bytes written to all session files.",
},
[]string{"file_type"},
)
}
func (s *sinkImpl) IncreaseTotalWrittenBytes(size float64, fileType string) {
if size == 0 {
return
}
s.totalWrittenBytes.WithLabelValues(fileType).Add(size)
}
func newCachedAssets(serviceName string) prometheus.Gauge {
return prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: serviceName,
Name: "assets_cached",
Help: "A gauge displaying the current number of cached assets.",
},
)
}
func (s *sinkImpl) IncreaseCachedAssets() {
s.cachedAssets.Inc()
}
func (s *sinkImpl) DecreaseCachedAssets() {
s.cachedAssets.Dec()
}
func newSkippedAssets(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "assets_skipped",
Help: "A counter displaying the total number of all skipped assets.",
},
)
}
func (s *sinkImpl) IncreaseSkippedAssets() {
s.skippedAssets.Inc()
}
func newTotalAssets(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "assets_total",
Help: "A counter displaying the total number of all processed assets.",
},
)
}
func (s *sinkImpl) IncreaseTotalAssets() {
s.totalAssets.Inc()
}
func newAssetSize(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "asset_size_bytes",
Help: "A histogram displaying the size of each asset in bytes.",
Buckets: common.DefaultSizeBuckets,
},
)
}
func (s *sinkImpl) RecordAssetSize(size float64) {
s.assetSize.Observe(size)
}
func newProcessAssetDuration(serviceName string) prometheus.Histogram {
return prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "asset_process_duration_seconds",
Help: "A histogram displaying the duration of processing for each asset in seconds.",
Buckets: common.DefaultDurationBuckets,
},
)
}
func (s *sinkImpl) RecordProcessAssetDuration(durMillis float64) {
s.processAssetDuration.Observe(durMillis / 1000.0)
}

View file

@ -0,0 +1,219 @@
package storage
import (
"github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
)
type Storage interface {
RecordSessionSize(fileSize float64, fileType string)
IncreaseStorageTotalSessions()
RecordSkippedSessionSize(fileSize float64, fileType string)
IncreaseStorageTotalSkippedSessions()
RecordSessionReadDuration(durMillis float64, fileType string)
RecordSessionSortDuration(durMillis float64, fileType string)
RecordSessionEncryptionDuration(durMillis float64, fileType string)
RecordSessionCompressDuration(durMillis float64, fileType string)
RecordSessionUploadDuration(durMillis float64, fileType string)
RecordSessionCompressionRatio(ratio float64, fileType string)
List() []prometheus.Collector
}
type storageImpl struct {
sessionSize *prometheus.HistogramVec
totalSessions prometheus.Counter
skippedSessionSize *prometheus.HistogramVec
totalSkippedSessions prometheus.Counter
sessionReadDuration *prometheus.HistogramVec
sessionSortDuration *prometheus.HistogramVec
sessionEncryptionDuration *prometheus.HistogramVec
sessionCompressDuration *prometheus.HistogramVec
sessionUploadDuration *prometheus.HistogramVec
sessionCompressionRatio *prometheus.HistogramVec
}
func New(serviceName string) Storage {
return &storageImpl{
sessionSize: newSessionSize(serviceName),
totalSessions: newTotalSessions(serviceName),
skippedSessionSize: newSkippedSessionSize(serviceName),
totalSkippedSessions: newTotalSkippedSessions(serviceName),
sessionReadDuration: newSessionReadDuration(serviceName),
sessionSortDuration: newSessionSortDuration(serviceName),
sessionEncryptionDuration: newSessionEncryptionDuration(serviceName),
sessionCompressDuration: newSessionCompressDuration(serviceName),
sessionUploadDuration: newSessionUploadDuration(serviceName),
sessionCompressionRatio: newSessionCompressionRatio(serviceName),
}
}
func (s *storageImpl) List() []prometheus.Collector {
return []prometheus.Collector{
s.sessionSize,
s.totalSessions,
s.skippedSessionSize,
s.sessionReadDuration,
s.sessionSortDuration,
s.sessionEncryptionDuration,
s.sessionCompressDuration,
s.sessionUploadDuration,
s.sessionCompressionRatio,
}
}
func newSessionSize(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "session_size_bytes",
Help: "A histogram displaying the size of each session file in bytes prior to any manipulation.",
Buckets: common.DefaultSizeBuckets,
},
[]string{"file_type"},
)
}
func (s *storageImpl) RecordSessionSize(fileSize float64, fileType string) {
s.sessionSize.WithLabelValues(fileType).Observe(fileSize)
}
func newTotalSessions(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "sessions_total",
Help: "A counter displaying the total number of all processed sessions.",
},
)
}
func (s *storageImpl) IncreaseStorageTotalSessions() {
s.totalSessions.Inc()
}
func newSkippedSessionSize(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "skipped_session_size_bytes",
Help: "A histogram displaying the size of each skipped session file in bytes.",
Buckets: common.DefaultSizeBuckets,
},
[]string{"file_type"},
)
}
func (s *storageImpl) RecordSkippedSessionSize(fileSize float64, fileType string) {
s.skippedSessionSize.WithLabelValues(fileType).Observe(fileSize)
}
func newTotalSkippedSessions(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "sessions_skipped_total",
Help: "A counter displaying the total number of all skipped sessions because of the size limits.",
},
)
}
func (s *storageImpl) IncreaseStorageTotalSkippedSessions() {
s.totalSkippedSessions.Inc()
}
func newSessionReadDuration(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "read_duration_seconds",
Help: "A histogram displaying the duration of reading for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
}
func (s *storageImpl) RecordSessionReadDuration(durMillis float64, fileType string) {
s.sessionReadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
}
func newSessionSortDuration(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "sort_duration_seconds",
Help: "A histogram displaying the duration of sorting for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
}
func (s *storageImpl) RecordSessionSortDuration(durMillis float64, fileType string) {
s.sessionSortDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
}
func newSessionEncryptionDuration(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "encryption_duration_seconds",
Help: "A histogram displaying the duration of encoding for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
}
func (s *storageImpl) RecordSessionEncryptionDuration(durMillis float64, fileType string) {
s.sessionEncryptionDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
}
func newSessionCompressDuration(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "compress_duration_seconds",
Help: "A histogram displaying the duration of compressing for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
}
func (s *storageImpl) RecordSessionCompressDuration(durMillis float64, fileType string) {
s.sessionCompressDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
}
func newSessionUploadDuration(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "upload_duration_seconds",
Help: "A histogram displaying the duration of uploading to s3 for each session in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
}
func (s *storageImpl) RecordSessionUploadDuration(durMillis float64, fileType string) {
s.sessionUploadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
}
func newSessionCompressionRatio(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "compression_ratio",
Help: "A histogram displaying the compression ratio of mob files for each session.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"file_type"},
)
}
func (s *storageImpl) RecordSessionCompressionRatio(ratio float64, fileType string) {
s.sessionCompressionRatio.WithLabelValues(fileType).Observe(ratio)
}

View file

@ -0,0 +1,84 @@
package web
import (
"strconv"
"github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
)
type Web interface {
RecordRequestSize(size float64, url string, code int)
RecordRequestDuration(durMillis float64, url string, code int)
IncreaseTotalRequests()
List() []prometheus.Collector
}
type webImpl struct {
httpRequestSize *prometheus.HistogramVec
httpRequestDuration *prometheus.HistogramVec
httpTotalRequests prometheus.Counter
}
func New(serviceName string) Web {
return &webImpl{
httpRequestSize: newRequestSizeMetric(serviceName),
httpRequestDuration: newRequestDurationMetric(serviceName),
httpTotalRequests: newTotalRequestsMetric(serviceName),
}
}
func (w *webImpl) List() []prometheus.Collector {
return []prometheus.Collector{
w.httpRequestSize,
w.httpRequestDuration,
w.httpTotalRequests,
}
}
func newRequestSizeMetric(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "web_request_size_bytes",
Help: "A histogram displaying the size of each HTTP request in bytes.",
Buckets: common.DefaultSizeBuckets,
},
[]string{"url", "response_code"},
)
}
func (w *webImpl) RecordRequestSize(size float64, url string, code int) {
w.httpRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size)
}
func newRequestDurationMetric(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "web_request_duration_seconds",
Help: "A histogram displaying the duration of each HTTP request in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"url", "response_code"},
)
}
func (w *webImpl) RecordRequestDuration(durMillis float64, url string, code int) {
w.httpRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0)
}
func newTotalRequestsMetric(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "web_requests_total",
Help: "A counter displaying the number all HTTP requests.",
},
)
}
func (w *webImpl) IncreaseTotalRequests() {
w.httpTotalRequests.Inc()
}

View file

@ -4,6 +4,7 @@ import (
"context" "context"
"errors" "errors"
"net/http" "net/http"
"openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/db/postgres" "openreplay/backend/pkg/db/postgres"
db "openreplay/backend/pkg/db/postgres/pool" db "openreplay/backend/pkg/db/postgres/pool"
@ -18,13 +19,14 @@ type Tracer interface {
} }
type tracerImpl struct { type tracerImpl struct {
log logger.Logger log logger.Logger
conn db.Pool conn db.Pool
traces postgres.Bulk traces postgres.Bulk
saver pool.WorkerPool saver pool.WorkerPool
metrics database.Database
} }
func NewTracer(log logger.Logger, conn db.Pool) (Tracer, error) { func NewTracer(log logger.Logger, conn db.Pool, metrics database.Database) (Tracer, error) {
switch { switch {
case log == nil: case log == nil:
return nil, errors.New("logger is required") return nil, errors.New("logger is required")
@ -32,8 +34,9 @@ func NewTracer(log logger.Logger, conn db.Pool) (Tracer, error) {
return nil, errors.New("connection is required") return nil, errors.New("connection is required")
} }
tracer := &tracerImpl{ tracer := &tracerImpl{
log: log, log: log,
conn: conn, conn: conn,
metrics: metrics,
} }
if err := tracer.initBulk(); err != nil { if err := tracer.initBulk(); err != nil {
return nil, err return nil, err
@ -43,7 +46,7 @@ func NewTracer(log logger.Logger, conn db.Pool) (Tracer, error) {
} }
func (t *tracerImpl) initBulk() (err error) { func (t *tracerImpl) initBulk() (err error) {
t.traces, err = postgres.NewBulk(t.conn, t.traces, err = postgres.NewBulk(t.conn, t.metrics,
"traces", "traces",
"(user_id, tenant_id, auth, action, method, path_format, endpoint, payload, parameters, status)", "(user_id, tenant_id, auth, action, method, path_format, endpoint, payload, parameters, status)",
"($%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d)", "($%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d)",

View file

@ -10,8 +10,15 @@ import (
"openreplay/backend/pkg/metrics/database" "openreplay/backend/pkg/metrics/database"
) )
var ErrDisabledCache = errors.New("cache is disabled")
type cacheImpl struct { type cacheImpl struct {
db *redis.Client db *redis.Client
metrics database.Database
}
func NewCache(db *redis.Client, metrics database.Database) Cache {
return &cacheImpl{db: db, metrics: metrics}
} }
func (c *cacheImpl) SetCache(sessID uint64, data map[string]string) error { func (c *cacheImpl) SetCache(sessID uint64, data map[string]string) error {
@ -32,8 +39,8 @@ func (c *cacheImpl) SetCache(sessID uint64, data map[string]string) error {
if _, err = c.db.Redis.Set(fmt.Sprintf("session:cache:id:%d", sessID), sessionBytes, time.Minute*120).Result(); err != nil { if _, err = c.db.Redis.Set(fmt.Sprintf("session:cache:id:%d", sessID), sessionBytes, time.Minute*120).Result(); err != nil {
return err return err
} }
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "setCache", "session") c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "setCache", "session")
database.IncreaseRedisRequests("setCache", "sessions") c.metrics.IncreaseRedisRequests("setCache", "sessions")
return nil return nil
} }
@ -53,8 +60,8 @@ func (c *cacheImpl) GetCache(sessID uint64) (map[string]string, error) {
if err = json.Unmarshal([]byte(result), &session); err != nil { if err = json.Unmarshal([]byte(result), &session); err != nil {
return nil, err return nil, err
} }
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "getCache", "session") c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "getCache", "session")
database.IncreaseRedisRequests("getCache", "sessions") c.metrics.IncreaseRedisRequests("getCache", "sessions")
return session, nil return session, nil
} }
@ -76,8 +83,8 @@ func (c *cacheImpl) Set(session *Session) error {
if _, err = c.db.Redis.Set(fmt.Sprintf("session:id:%d", session.SessionID), sessionBytes, time.Minute*60).Result(); err != nil { if _, err = c.db.Redis.Set(fmt.Sprintf("session:id:%d", session.SessionID), sessionBytes, time.Minute*60).Result(); err != nil {
return err return err
} }
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "set", "session") c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "set", "session")
database.IncreaseRedisRequests("set", "sessions") c.metrics.IncreaseRedisRequests("set", "sessions")
return nil return nil
} }
@ -97,13 +104,7 @@ func (c *cacheImpl) Get(sessionID uint64) (*Session, error) {
if err = json.Unmarshal([]byte(result), session); err != nil { if err = json.Unmarshal([]byte(result), session); err != nil {
return nil, err return nil, err
} }
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "session") c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "session")
database.IncreaseRedisRequests("get", "sessions") c.metrics.IncreaseRedisRequests("get", "sessions")
return session, nil return session, nil
} }
var ErrDisabledCache = errors.New("cache is disabled")
func NewCache(db *redis.Client) Cache {
return &cacheImpl{db: db}
}