Observability upgrade (#3146)
* feat(metrics): grand update * feat(metrics): fixed missing part in ee tracer * feat(assets): added missing arg * feat(metrics): fixed naming problems
This commit is contained in:
parent
fe1130397c
commit
3b3e95a413
62 changed files with 1901 additions and 1619 deletions
|
|
@ -8,8 +8,7 @@ import (
|
|||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
analyticsMetrics "openreplay/backend/pkg/metrics/analytics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/metrics/web"
|
||||
"openreplay/backend/pkg/server"
|
||||
"openreplay/backend/pkg/server/api"
|
||||
|
|
@ -19,16 +18,18 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := analyticsConfig.New(log)
|
||||
// Observability
|
||||
webMetrics := web.New("analytics")
|
||||
metrics.New(log, append(webMetrics.List(), append(analyticsMetrics.List(), databaseMetrics.List()...)...))
|
||||
dbMetrics := database.New("analytics")
|
||||
metrics.New(log, append(webMetrics.List(), dbMetrics.List()...))
|
||||
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
pgConn, err := pool.New(dbMetrics, cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
defer pgConn.Close()
|
||||
|
||||
builder, err := analytics.NewServiceBuilder(log, cfg, webMetrics, pgConn)
|
||||
builder, err := analytics.NewServiceBuilder(log, cfg, webMetrics, dbMetrics, pgConn)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init services: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,13 +22,15 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, assetsMetrics.List())
|
||||
// Observability
|
||||
assetMetrics := assetsMetrics.New("assets")
|
||||
metrics.New(log, assetMetrics.List())
|
||||
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init object storage: %s", err)
|
||||
}
|
||||
cacher, err := cacher.NewCacher(cfg, objStore)
|
||||
cacher, err := cacher.NewCacher(cfg, objStore, assetMetrics)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init cacher: %s", err)
|
||||
}
|
||||
|
|
@ -37,7 +39,7 @@ func main() {
|
|||
switch m := msg.(type) {
|
||||
case *messages.AssetCache:
|
||||
cacher.CacheURL(m.SessionID(), m.URL)
|
||||
assetsMetrics.IncreaseProcessesSessions()
|
||||
assetMetrics.IncreaseProcessesSessions()
|
||||
case *messages.JSException:
|
||||
sourceList, err := assets.ExtractJSExceptionSources(&m.Payload)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -22,6 +22,7 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
// Observability
|
||||
canvasMetrics := canvasesMetrics.New("canvases")
|
||||
metrics.New(log, canvasMetrics.List())
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
"openreplay/backend/pkg/memory"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/projects"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
|
|
@ -26,22 +26,24 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, databaseMetrics.List())
|
||||
// Observability
|
||||
dbMetric := database.New("db")
|
||||
metrics.New(log, dbMetric.List())
|
||||
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
defer pgConn.Close()
|
||||
|
||||
chConn := clickhouse.NewConnector(cfg.Clickhouse)
|
||||
chConn := clickhouse.NewConnector(cfg.Clickhouse, dbMetric)
|
||||
if err := chConn.Prepare(); err != nil {
|
||||
log.Fatal(ctx, "can't prepare clickhouse: %s", err)
|
||||
}
|
||||
defer chConn.Stop()
|
||||
|
||||
// Init db proxy module (postgres + clickhouse + batches)
|
||||
dbProxy := postgres.NewConn(log, pgConn, chConn)
|
||||
dbProxy := postgres.NewConn(log, pgConn, chConn, dbMetric)
|
||||
defer dbProxy.Close()
|
||||
|
||||
// Init redis connection
|
||||
|
|
@ -51,8 +53,8 @@ func main() {
|
|||
}
|
||||
defer redisClient.Close()
|
||||
|
||||
projManager := projects.New(log, pgConn, redisClient)
|
||||
sessManager := sessions.New(log, pgConn, projManager, redisClient)
|
||||
projManager := projects.New(log, pgConn, redisClient, dbMetric)
|
||||
sessManager := sessions.New(log, pgConn, projManager, redisClient, dbMetric)
|
||||
tagsManager := tags.New(log, pgConn)
|
||||
|
||||
// Init data saver
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import (
|
|||
"openreplay/backend/pkg/memory"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
enderMetrics "openreplay/backend/pkg/metrics/ender"
|
||||
"openreplay/backend/pkg/projects"
|
||||
"openreplay/backend/pkg/queue"
|
||||
|
|
@ -31,9 +31,12 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := ender.New(log)
|
||||
metrics.New(log, append(enderMetrics.List(), databaseMetrics.List()...))
|
||||
// Observability
|
||||
dbMetric := database.New("ender")
|
||||
enderMetric := enderMetrics.New("ender")
|
||||
metrics.New(log, append(enderMetric.List(), dbMetric.List()...))
|
||||
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
|
|
@ -45,10 +48,10 @@ func main() {
|
|||
}
|
||||
defer redisClient.Close()
|
||||
|
||||
projManager := projects.New(log, pgConn, redisClient)
|
||||
sessManager := sessions.New(log, pgConn, projManager, redisClient)
|
||||
projManager := projects.New(log, pgConn, redisClient, dbMetric)
|
||||
sessManager := sessions.New(log, pgConn, projManager, redisClient, dbMetric)
|
||||
|
||||
sessionEndGenerator, err := sessionender.New(intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
|
||||
sessionEndGenerator, err := sessionender.New(enderMetric, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init ender service: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,9 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, heuristicsMetrics.List())
|
||||
// Observability
|
||||
heuristicsMetric := heuristicsMetrics.New("heuristics")
|
||||
metrics.New(log, heuristicsMetric.List())
|
||||
|
||||
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
|
||||
handlersFabric := func() []handlers.MessageProcessor {
|
||||
|
|
@ -62,7 +64,7 @@ func main() {
|
|||
}
|
||||
|
||||
// Run service and wait for TERM signal
|
||||
service := heuristics.New(log, cfg, producer, consumer, eventBuilder, memoryManager)
|
||||
service := heuristics.New(log, cfg, producer, consumer, eventBuilder, memoryManager, heuristicsMetric)
|
||||
log.Info(ctx, "Heuristics service started")
|
||||
terminator.Wait(log, service)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"openreplay/backend/pkg/db/redis"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/metrics/web"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/server"
|
||||
|
|
@ -20,13 +20,15 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := http.New(log)
|
||||
// Observability
|
||||
webMetrics := web.New("http")
|
||||
metrics.New(log, append(webMetrics.List(), databaseMetrics.List()...))
|
||||
dbMetric := database.New("http")
|
||||
metrics.New(log, append(webMetrics.List(), dbMetric.List()...))
|
||||
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
defer producer.Close(15000)
|
||||
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
|
|
@ -38,7 +40,7 @@ func main() {
|
|||
}
|
||||
defer redisClient.Close()
|
||||
|
||||
builder, err := services.New(log, cfg, webMetrics, producer, pgConn, redisClient)
|
||||
builder, err := services.New(log, cfg, webMetrics, dbMetric, producer, pgConn, redisClient)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed while creating services: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
// Observability
|
||||
imageMetrics := imagesMetrics.New("images")
|
||||
metrics.New(log, imageMetrics.List())
|
||||
|
||||
|
|
|
|||
|
|
@ -18,16 +18,18 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
// Observability
|
||||
webMetrics := web.New("integrations")
|
||||
metrics.New(log, append(webMetrics.List(), database.List()...))
|
||||
dbMetric := database.New("integrations")
|
||||
metrics.New(log, append(webMetrics.List(), dbMetric.List()...))
|
||||
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
defer pgConn.Close()
|
||||
|
||||
builder, err := integrations.NewServiceBuilder(log, cfg, webMetrics, pgConn)
|
||||
builder, err := integrations.NewServiceBuilder(log, cfg, webMetrics, dbMetric, pgConn)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init services: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,14 +9,14 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/config/sink"
|
||||
config "openreplay/backend/internal/config/sink"
|
||||
"openreplay/backend/internal/sink/assetscache"
|
||||
"openreplay/backend/internal/sink/sessionwriter"
|
||||
"openreplay/backend/internal/storage"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
sinkMetrics "openreplay/backend/pkg/metrics/sink"
|
||||
"openreplay/backend/pkg/metrics/sink"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
)
|
||||
|
|
@ -24,7 +24,9 @@ import (
|
|||
func main() {
|
||||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := sink.New(log)
|
||||
cfg := config.New(log)
|
||||
// Observability
|
||||
sinkMetrics := sink.New("sink")
|
||||
metrics.New(log, sinkMetrics.List())
|
||||
|
||||
if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) {
|
||||
|
|
@ -39,7 +41,7 @@ func main() {
|
|||
if err != nil {
|
||||
log.Fatal(ctx, "can't init rewriter: %s", err)
|
||||
}
|
||||
assetMessageHandler := assetscache.New(log, cfg, rewriter, producer)
|
||||
assetMessageHandler := assetscache.New(log, cfg, rewriter, producer, sinkMetrics)
|
||||
counter := storage.NewLogCounter()
|
||||
|
||||
var (
|
||||
|
|
@ -191,7 +193,7 @@ func main() {
|
|||
cfg.TopicRawWeb,
|
||||
cfg.TopicRawMobile,
|
||||
},
|
||||
messages.NewSinkMessageIterator(log, msgHandler, nil, false),
|
||||
messages.NewSinkMessageIterator(log, msgHandler, nil, false, sinkMetrics),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -19,17 +19,20 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := spotConfig.New(log)
|
||||
// Observability
|
||||
webMetrics := web.New("spot")
|
||||
metrics.New(log, append(webMetrics.List(), append(spotMetrics.List(), databaseMetrics.List()...)...))
|
||||
spotMetric := spotMetrics.New("spot")
|
||||
dbMetric := databaseMetrics.New("spot")
|
||||
metrics.New(log, append(webMetrics.List(), append(spotMetric.List(), dbMetric.List()...)...))
|
||||
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
defer pgConn.Close()
|
||||
|
||||
prefix := api.NoPrefix
|
||||
builder, err := spot.NewServiceBuilder(log, cfg, webMetrics, pgConn, prefix)
|
||||
builder, err := spot.NewServiceBuilder(log, cfg, webMetrics, spotMetric, dbMetric, pgConn, prefix)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init services: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,13 +23,15 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, storageMetrics.List())
|
||||
// Observability
|
||||
storageMetric := storageMetrics.New("storage")
|
||||
metrics.New(log, storageMetric.List())
|
||||
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init object storage: %s", err)
|
||||
}
|
||||
srv, err := storage.New(cfg, log, objStore)
|
||||
srv, err := storage.New(cfg, log, objStore, storageMetric)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init storage service: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ type cacher struct {
|
|||
objStorage objectstorage.ObjectStorage // AWS Docs: "These clients are safe to use concurrently."
|
||||
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
|
||||
rewriter *assets.Rewriter // Read only
|
||||
metrics metrics.Assets
|
||||
Errors chan error
|
||||
sizeLimit int
|
||||
requestHeaders map[string]string
|
||||
|
|
@ -37,7 +38,7 @@ func (c *cacher) CanCache() bool {
|
|||
return c.workers.CanAddTask()
|
||||
}
|
||||
|
||||
func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage) (*cacher, error) {
|
||||
func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage, metrics metrics.Assets) (*cacher, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, errors.New("config is nil")
|
||||
|
|
@ -93,6 +94,7 @@ func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage) (*cacher,
|
|||
Errors: make(chan error),
|
||||
sizeLimit: cfg.AssetsSizeLimit,
|
||||
requestHeaders: cfg.AssetsRequestHeaders,
|
||||
metrics: metrics,
|
||||
}
|
||||
c.workers = NewPool(64, c.CacheFile)
|
||||
return c, nil
|
||||
|
|
@ -115,7 +117,7 @@ func (c *cacher) cacheURL(t *Task) {
|
|||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
metrics.RecordDownloadDuration(float64(time.Now().Sub(start).Milliseconds()), res.StatusCode)
|
||||
c.metrics.RecordDownloadDuration(float64(time.Now().Sub(start).Milliseconds()), res.StatusCode)
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode >= 400 {
|
||||
printErr := true
|
||||
|
|
@ -162,12 +164,12 @@ func (c *cacher) cacheURL(t *Task) {
|
|||
start = time.Now()
|
||||
err = c.objStorage.Upload(strings.NewReader(strData), t.cachePath, contentType, contentEncoding, objectstorage.NoCompression)
|
||||
if err != nil {
|
||||
metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true)
|
||||
c.metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true)
|
||||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), false)
|
||||
metrics.IncreaseSavedSessions()
|
||||
c.metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), false)
|
||||
c.metrics.IncreaseSavedSessions()
|
||||
|
||||
if isCSS {
|
||||
if t.depth > 0 {
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/memory"
|
||||
"openreplay/backend/pkg/messages"
|
||||
metrics "openreplay/backend/pkg/metrics/heuristics"
|
||||
heuristicMetrics "openreplay/backend/pkg/metrics/heuristics"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
|
|
@ -23,11 +23,12 @@ type heuristicsImpl struct {
|
|||
consumer types.Consumer
|
||||
events builders.EventBuilder
|
||||
mm memory.Manager
|
||||
metrics heuristicMetrics.Heuristics
|
||||
done chan struct{}
|
||||
finished chan struct{}
|
||||
}
|
||||
|
||||
func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Consumer, e builders.EventBuilder, mm memory.Manager) service.Interface {
|
||||
func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Consumer, e builders.EventBuilder, mm memory.Manager, metrics heuristicMetrics.Heuristics) service.Interface {
|
||||
s := &heuristicsImpl{
|
||||
log: log,
|
||||
ctx: context.Background(),
|
||||
|
|
@ -36,6 +37,7 @@ func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Co
|
|||
consumer: c,
|
||||
events: e,
|
||||
mm: mm,
|
||||
metrics: metrics,
|
||||
done: make(chan struct{}),
|
||||
finished: make(chan struct{}),
|
||||
}
|
||||
|
|
@ -51,7 +53,7 @@ func (h *heuristicsImpl) run() {
|
|||
if err := h.producer.Produce(h.cfg.TopicAnalytics, evt.SessionID(), evt.Encode()); err != nil {
|
||||
h.log.Error(h.ctx, "can't send new event to queue: %s", err)
|
||||
} else {
|
||||
metrics.IncreaseTotalEvents(messageTypeName(evt))
|
||||
h.metrics.IncreaseTotalEvents(messageTypeName(evt))
|
||||
}
|
||||
case <-tick:
|
||||
h.producer.Flush(h.cfg.ProducerTimeout)
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import (
|
|||
featureflagsAPI "openreplay/backend/pkg/featureflags/api"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/metrics/web"
|
||||
"openreplay/backend/pkg/objectstorage/store"
|
||||
"openreplay/backend/pkg/projects"
|
||||
|
|
@ -36,8 +37,8 @@ type ServicesBuilder struct {
|
|||
UxTestsAPI api.Handlers
|
||||
}
|
||||
|
||||
func New(log logger.Logger, cfg *http.Config, metrics web.Web, producer types.Producer, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) {
|
||||
projs := projects.New(log, pgconn, redis)
|
||||
func New(log logger.Logger, cfg *http.Config, webMetrics web.Web, dbMetrics database.Database, producer types.Producer, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) {
|
||||
projs := projects.New(log, pgconn, redis, dbMetrics)
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -53,11 +54,11 @@ func New(log logger.Logger, cfg *http.Config, metrics web.Web, producer types.Pr
|
|||
tokenizer := token.NewTokenizer(cfg.TokenSecret)
|
||||
conditions := conditions.New(pgconn)
|
||||
flaker := flakeid.NewFlaker(cfg.WorkerID)
|
||||
sessions := sessions.New(log, pgconn, projs, redis)
|
||||
sessions := sessions.New(log, pgconn, projs, redis, dbMetrics)
|
||||
featureFlags := featureflags.New(pgconn)
|
||||
tags := tags.New(log, pgconn)
|
||||
uxTesting := uxtesting.New(pgconn)
|
||||
responser := api.NewResponser(metrics)
|
||||
responser := api.NewResponser(webMetrics)
|
||||
builder := &ServicesBuilder{}
|
||||
if builder.WebAPI, err = websessions.NewHandlers(cfg, log, responser, producer, projs, sessions, uaModule, geoModule, tokenizer, conditions, flaker); err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ type session struct {
|
|||
|
||||
// SessionEnder updates timestamp of last message for each session
|
||||
type SessionEnder struct {
|
||||
metrics ender.Ender
|
||||
timeout int64
|
||||
sessions map[uint64]*session // map[sessionID]session
|
||||
timeCtrl *timeController
|
||||
|
|
@ -28,8 +29,9 @@ type SessionEnder struct {
|
|||
enabled bool
|
||||
}
|
||||
|
||||
func New(timeout int64, parts int) (*SessionEnder, error) {
|
||||
func New(metrics ender.Ender, timeout int64, parts int) (*SessionEnder, error) {
|
||||
return &SessionEnder{
|
||||
metrics: metrics,
|
||||
timeout: timeout,
|
||||
sessions: make(map[uint64]*session),
|
||||
timeCtrl: NewTimeController(parts),
|
||||
|
|
@ -56,7 +58,7 @@ func (se *SessionEnder) ActivePartitions(parts []uint64) {
|
|||
for sessID, _ := range se.sessions {
|
||||
if !activeParts[sessID%se.parts] {
|
||||
delete(se.sessions, sessID)
|
||||
ender.DecreaseActiveSessions()
|
||||
se.metrics.DecreaseActiveSessions()
|
||||
removedSessions++
|
||||
} else {
|
||||
activeSessions++
|
||||
|
|
@ -89,8 +91,8 @@ func (se *SessionEnder) UpdateSession(msg messages.Message) {
|
|||
isEnded: false,
|
||||
isMobile: messages.IsMobileType(msg.TypeID()),
|
||||
}
|
||||
ender.IncreaseActiveSessions()
|
||||
ender.IncreaseTotalSessions()
|
||||
se.metrics.IncreaseActiveSessions()
|
||||
se.metrics.IncreaseTotalSessions()
|
||||
return
|
||||
}
|
||||
// Keep the highest user's timestamp for correct session duration value
|
||||
|
|
@ -139,8 +141,8 @@ func (se *SessionEnder) HandleEndedSessions(handler EndedSessionHandler) {
|
|||
sess.isEnded = true
|
||||
if res, _ := handler(sessID, sess.lastUserTime); res {
|
||||
delete(se.sessions, sessID)
|
||||
ender.DecreaseActiveSessions()
|
||||
ender.IncreaseClosedSessions()
|
||||
se.metrics.DecreaseActiveSessions()
|
||||
se.metrics.IncreaseClosedSessions()
|
||||
removedSessions++
|
||||
if endCase == 2 {
|
||||
brokerTime[1]++
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
"openreplay/backend/internal/config/sink"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
metrics "openreplay/backend/pkg/metrics/sink"
|
||||
sinkMetrics "openreplay/backend/pkg/metrics/sink"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
)
|
||||
|
|
@ -30,9 +30,10 @@ type AssetsCache struct {
|
|||
producer types.Producer
|
||||
cache map[string]*CachedAsset
|
||||
blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain
|
||||
metrics sinkMetrics.Sink
|
||||
}
|
||||
|
||||
func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer) *AssetsCache {
|
||||
func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, metrics sinkMetrics.Sink) *AssetsCache {
|
||||
assetsCache := &AssetsCache{
|
||||
log: log,
|
||||
cfg: cfg,
|
||||
|
|
@ -40,6 +41,7 @@ func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, produce
|
|||
producer: producer,
|
||||
cache: make(map[string]*CachedAsset, 64),
|
||||
blackList: make([]string, 0),
|
||||
metrics: metrics,
|
||||
}
|
||||
// Parse black list for cache layer
|
||||
if len(cfg.CacheBlackList) > 0 {
|
||||
|
|
@ -76,7 +78,7 @@ func (e *AssetsCache) clearCache() {
|
|||
if int64(now.Sub(cache.ts).Minutes()) > e.cfg.CacheExpiration {
|
||||
deleted++
|
||||
delete(e.cache, id)
|
||||
metrics.DecreaseCachedAssets()
|
||||
e.metrics.DecreaseCachedAssets()
|
||||
}
|
||||
}
|
||||
e.log.Info(context.Background(), "cache cleaner: deleted %d/%d assets", deleted, cacheSize)
|
||||
|
|
@ -194,7 +196,7 @@ func parseHost(baseURL string) (string, error) {
|
|||
}
|
||||
|
||||
func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string {
|
||||
metrics.IncreaseTotalAssets()
|
||||
e.metrics.IncreaseTotalAssets()
|
||||
// Try to find asset in cache
|
||||
h := md5.New()
|
||||
// Cut first part of url (scheme + host)
|
||||
|
|
@ -217,7 +219,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
|||
e.mutex.RUnlock()
|
||||
if ok {
|
||||
if int64(time.Now().Sub(cachedAsset.ts).Minutes()) < e.cfg.CacheExpiration {
|
||||
metrics.IncreaseSkippedAssets()
|
||||
e.metrics.IncreaseSkippedAssets()
|
||||
return cachedAsset.msg
|
||||
}
|
||||
}
|
||||
|
|
@ -229,8 +231,8 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
|||
start := time.Now()
|
||||
res := e.getRewrittenCSS(sessionID, baseURL, css)
|
||||
duration := time.Now().Sub(start).Milliseconds()
|
||||
metrics.RecordAssetSize(float64(len(res)))
|
||||
metrics.RecordProcessAssetDuration(float64(duration))
|
||||
e.metrics.RecordAssetSize(float64(len(res)))
|
||||
e.metrics.RecordProcessAssetDuration(float64(duration))
|
||||
// Save asset to cache if we spent more than threshold
|
||||
if duration > e.cfg.CacheThreshold {
|
||||
e.mutex.Lock()
|
||||
|
|
@ -239,7 +241,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
|||
ts: time.Now(),
|
||||
}
|
||||
e.mutex.Unlock()
|
||||
metrics.IncreaseCachedAssets()
|
||||
e.metrics.IncreaseCachedAssets()
|
||||
}
|
||||
// Return rewritten asset
|
||||
return res
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ import (
|
|||
config "openreplay/backend/internal/config/storage"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
metrics "openreplay/backend/pkg/metrics/storage"
|
||||
storageMetrics "openreplay/backend/pkg/metrics/storage"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
"openreplay/backend/pkg/pool"
|
||||
)
|
||||
|
|
@ -77,9 +77,10 @@ type Storage struct {
|
|||
splitTime uint64
|
||||
processorPool pool.WorkerPool
|
||||
uploaderPool pool.WorkerPool
|
||||
metrics storageMetrics.Storage
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage) (*Storage, error) {
|
||||
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage, metrics storageMetrics.Storage) (*Storage, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
|
|
@ -92,6 +93,7 @@ func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectS
|
|||
objStorage: objStorage,
|
||||
startBytes: make([]byte, cfg.FileSplitSize),
|
||||
splitTime: parseSplitTime(cfg.FileSplitTime),
|
||||
metrics: metrics,
|
||||
}
|
||||
s.processorPool = pool.NewPool(1, 1, s.doCompression)
|
||||
s.uploaderPool = pool.NewPool(1, 1, s.uploadSession)
|
||||
|
|
@ -141,7 +143,7 @@ func (s *Storage) Process(ctx context.Context, msg *messages.SessionEnd) (err er
|
|||
if err != nil {
|
||||
if strings.Contains(err.Error(), "big file") {
|
||||
s.log.Warn(ctx, "can't process session: %s", err)
|
||||
metrics.IncreaseStorageTotalSkippedSessions()
|
||||
s.metrics.IncreaseStorageTotalSkippedSessions()
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
|
|
@ -159,8 +161,8 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error {
|
|||
return err
|
||||
}
|
||||
|
||||
metrics.RecordSessionReadDuration(float64(time.Now().Sub(startRead).Milliseconds()), tp.String())
|
||||
metrics.RecordSessionSize(float64(len(mob)), tp.String())
|
||||
s.metrics.RecordSessionReadDuration(float64(time.Now().Sub(startRead).Milliseconds()), tp.String())
|
||||
s.metrics.RecordSessionSize(float64(len(mob)), tp.String())
|
||||
|
||||
// Put opened session file into task struct
|
||||
task.SetMob(mob, index, tp)
|
||||
|
|
@ -174,7 +176,7 @@ func (s *Storage) openSession(ctx context.Context, filePath string, tp FileType)
|
|||
// Check file size before download into memory
|
||||
info, err := os.Stat(filePath)
|
||||
if err == nil && info.Size() > s.cfg.MaxFileSize {
|
||||
metrics.RecordSkippedSessionSize(float64(info.Size()), tp.String())
|
||||
s.metrics.RecordSkippedSessionSize(float64(info.Size()), tp.String())
|
||||
return nil, -1, fmt.Errorf("big file, size: %d", info.Size())
|
||||
}
|
||||
// Read file into memory
|
||||
|
|
@ -190,7 +192,7 @@ func (s *Storage) openSession(ctx context.Context, filePath string, tp FileType)
|
|||
if err != nil {
|
||||
return nil, -1, fmt.Errorf("can't sort session, err: %s", err)
|
||||
}
|
||||
metrics.RecordSessionSortDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
s.metrics.RecordSessionSortDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
return mob, index, nil
|
||||
}
|
||||
|
||||
|
|
@ -234,12 +236,12 @@ func (s *Storage) packSession(task *Task, tp FileType) {
|
|||
// Compression
|
||||
start := time.Now()
|
||||
data := s.compress(task.ctx, mob, task.compression)
|
||||
metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
s.metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
|
||||
// Encryption
|
||||
start = time.Now()
|
||||
result := s.encryptSession(task.ctx, data.Bytes(), task.key)
|
||||
metrics.RecordSessionEncryptionDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
s.metrics.RecordSessionEncryptionDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
|
||||
if tp == DOM {
|
||||
task.doms = bytes.NewBuffer(result)
|
||||
|
|
@ -296,8 +298,8 @@ func (s *Storage) packSession(task *Task, tp FileType) {
|
|||
wg.Wait()
|
||||
|
||||
// Record metrics
|
||||
metrics.RecordSessionEncryptionDuration(float64(firstEncrypt+secondEncrypt), tp.String())
|
||||
metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String())
|
||||
s.metrics.RecordSessionEncryptionDuration(float64(firstEncrypt+secondEncrypt), tp.String())
|
||||
s.metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String())
|
||||
}
|
||||
|
||||
func (s *Storage) encryptSession(ctx context.Context, data []byte, encryptionKey string) []byte {
|
||||
|
|
@ -382,7 +384,7 @@ func (s *Storage) uploadSession(payload interface{}) {
|
|||
go func() {
|
||||
if task.doms != nil {
|
||||
// Record compression ratio
|
||||
metrics.RecordSessionCompressionRatio(task.domsRawSize/float64(task.doms.Len()), DOM.String())
|
||||
s.metrics.RecordSessionCompressionRatio(task.domsRawSize/float64(task.doms.Len()), DOM.String())
|
||||
// Upload session to s3
|
||||
start := time.Now()
|
||||
if err := s.objStorage.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
|
||||
|
|
@ -395,7 +397,7 @@ func (s *Storage) uploadSession(payload interface{}) {
|
|||
go func() {
|
||||
if task.dome != nil {
|
||||
// Record compression ratio
|
||||
metrics.RecordSessionCompressionRatio(task.domeRawSize/float64(task.dome.Len()), DOM.String())
|
||||
s.metrics.RecordSessionCompressionRatio(task.domeRawSize/float64(task.dome.Len()), DOM.String())
|
||||
// Upload session to s3
|
||||
start := time.Now()
|
||||
if err := s.objStorage.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
|
||||
|
|
@ -408,7 +410,7 @@ func (s *Storage) uploadSession(payload interface{}) {
|
|||
go func() {
|
||||
if task.dev != nil {
|
||||
// Record compression ratio
|
||||
metrics.RecordSessionCompressionRatio(task.devRawSize/float64(task.dev.Len()), DEV.String())
|
||||
s.metrics.RecordSessionCompressionRatio(task.devRawSize/float64(task.dev.Len()), DEV.String())
|
||||
// Upload session to s3
|
||||
start := time.Now()
|
||||
if err := s.objStorage.Upload(task.dev, task.id+string(DEV), "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
|
||||
|
|
@ -419,9 +421,9 @@ func (s *Storage) uploadSession(payload interface{}) {
|
|||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
metrics.RecordSessionUploadDuration(float64(uploadDoms+uploadDome), DOM.String())
|
||||
metrics.RecordSessionUploadDuration(float64(uploadDev), DEV.String())
|
||||
metrics.IncreaseStorageTotalSessions()
|
||||
s.metrics.RecordSessionUploadDuration(float64(uploadDoms+uploadDome), DOM.String())
|
||||
s.metrics.RecordSessionUploadDuration(float64(uploadDev), DEV.String())
|
||||
s.metrics.IncreaseStorageTotalSessions()
|
||||
}
|
||||
|
||||
func (s *Storage) doCompression(payload interface{}) {
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package analytics
|
|||
import (
|
||||
"github.com/go-playground/validator/v10"
|
||||
"openreplay/backend/pkg/analytics/charts"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/config/analytics"
|
||||
|
|
@ -26,9 +27,9 @@ type ServicesBuilder struct {
|
|||
ChartsAPI api.Handlers
|
||||
}
|
||||
|
||||
func NewServiceBuilder(log logger.Logger, cfg *analytics.Config, webMetrics web.Web, pgconn pool.Pool) (*ServicesBuilder, error) {
|
||||
func NewServiceBuilder(log logger.Logger, cfg *analytics.Config, webMetrics web.Web, dbMetrics database.Database, pgconn pool.Pool) (*ServicesBuilder, error) {
|
||||
responser := api.NewResponser(webMetrics)
|
||||
audiTrail, err := tracer.NewTracer(log, pgconn)
|
||||
audiTrail, err := tracer.NewTracer(log, pgconn, dbMetrics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,13 +18,14 @@ type Bulk interface {
|
|||
}
|
||||
|
||||
type bulkImpl struct {
|
||||
conn driver.Conn
|
||||
table string
|
||||
query string
|
||||
values [][]interface{}
|
||||
conn driver.Conn
|
||||
metrics database.Database
|
||||
table string
|
||||
query string
|
||||
values [][]interface{}
|
||||
}
|
||||
|
||||
func NewBulk(conn driver.Conn, table, query string) (Bulk, error) {
|
||||
func NewBulk(conn driver.Conn, metrics database.Database, table, query string) (Bulk, error) {
|
||||
switch {
|
||||
case conn == nil:
|
||||
return nil, errors.New("clickhouse connection is empty")
|
||||
|
|
@ -34,10 +35,11 @@ func NewBulk(conn driver.Conn, table, query string) (Bulk, error) {
|
|||
return nil, errors.New("query is empty")
|
||||
}
|
||||
return &bulkImpl{
|
||||
conn: conn,
|
||||
table: table,
|
||||
query: query,
|
||||
values: make([][]interface{}, 0),
|
||||
conn: conn,
|
||||
metrics: metrics,
|
||||
table: table,
|
||||
query: query,
|
||||
values: make([][]interface{}, 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -60,8 +62,8 @@ func (b *bulkImpl) Send() error {
|
|||
}
|
||||
err = batch.Send()
|
||||
// Save bulk metrics
|
||||
database.RecordBulkElements(float64(len(b.values)), "ch", b.table)
|
||||
database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "ch", b.table)
|
||||
b.metrics.RecordBulkElements(float64(len(b.values)), "ch", b.table)
|
||||
b.metrics.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "ch", b.table)
|
||||
// Prepare values slice for a new data
|
||||
b.values = make([][]interface{}, 0)
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import (
|
|||
"fmt"
|
||||
"hash/fnv"
|
||||
"log"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
|
@ -57,13 +58,14 @@ func NewTask() *task {
|
|||
|
||||
type connectorImpl struct {
|
||||
conn driver.Conn
|
||||
metrics database.Database
|
||||
batches map[string]Bulk //driver.Batch
|
||||
workerTask chan *task
|
||||
done chan struct{}
|
||||
finished chan struct{}
|
||||
}
|
||||
|
||||
func NewConnector(cfg common.Clickhouse) Connector {
|
||||
func NewConnector(cfg common.Clickhouse, metrics database.Database) Connector {
|
||||
conn, err := clickhouse.Open(&clickhouse.Options{
|
||||
Addr: []string{cfg.GetTrimmedURL()},
|
||||
Auth: clickhouse.Auth{
|
||||
|
|
@ -84,6 +86,7 @@ func NewConnector(cfg common.Clickhouse) Connector {
|
|||
|
||||
c := &connectorImpl{
|
||||
conn: conn,
|
||||
metrics: metrics,
|
||||
batches: make(map[string]Bulk, 20),
|
||||
workerTask: make(chan *task, 1),
|
||||
done: make(chan struct{}),
|
||||
|
|
@ -94,7 +97,7 @@ func NewConnector(cfg common.Clickhouse) Connector {
|
|||
}
|
||||
|
||||
func (c *connectorImpl) newBatch(name, query string) error {
|
||||
batch, err := NewBulk(c.conn, name, query)
|
||||
batch, err := NewBulk(c.conn, c.metrics, name, query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create new batch: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ func NewBatchesTask(size int) *batchesTask {
|
|||
type BatchSet struct {
|
||||
log logger.Logger
|
||||
c pool.Pool
|
||||
metrics database.Database
|
||||
ctx context.Context
|
||||
batches map[uint64]*SessionBatch
|
||||
workerTask chan *batchesTask
|
||||
|
|
@ -59,10 +60,11 @@ type BatchSet struct {
|
|||
finished chan struct{}
|
||||
}
|
||||
|
||||
func NewBatchSet(log logger.Logger, c pool.Pool) *BatchSet {
|
||||
func NewBatchSet(log logger.Logger, c pool.Pool, metrics database.Database) *BatchSet {
|
||||
bs := &BatchSet{
|
||||
log: log,
|
||||
c: c,
|
||||
metrics: metrics,
|
||||
ctx: context.Background(),
|
||||
batches: make(map[uint64]*SessionBatch),
|
||||
workerTask: make(chan *batchesTask, 1),
|
||||
|
|
@ -104,7 +106,7 @@ func (conn *BatchSet) Stop() {
|
|||
func (conn *BatchSet) sendBatches(t *batchesTask) {
|
||||
for _, batch := range t.batches {
|
||||
// Record batch size
|
||||
database.RecordBatchElements(float64(batch.Len()))
|
||||
conn.metrics.RecordBatchElements(float64(batch.Len()))
|
||||
|
||||
start := time.Now()
|
||||
|
||||
|
|
@ -120,7 +122,7 @@ func (conn *BatchSet) sendBatches(t *batchesTask) {
|
|||
}
|
||||
}
|
||||
br.Close() // returns err
|
||||
database.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds()))
|
||||
conn.metrics.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds()))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ type Bulk interface {
|
|||
|
||||
type bulkImpl struct {
|
||||
conn pool.Pool
|
||||
metrics database.Database
|
||||
table string
|
||||
columns string
|
||||
template string
|
||||
|
|
@ -75,12 +76,12 @@ func (b *bulkImpl) send() error {
|
|||
return fmt.Errorf("send bulk err: %s", err)
|
||||
}
|
||||
// Save bulk metrics
|
||||
database.RecordBulkElements(float64(size), "pg", b.table)
|
||||
database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "pg", b.table)
|
||||
b.metrics.RecordBulkElements(float64(size), "pg", b.table)
|
||||
b.metrics.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "pg", b.table)
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewBulk(conn pool.Pool, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
|
||||
func NewBulk(conn pool.Pool, metrics database.Database, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
|
||||
switch {
|
||||
case conn == nil:
|
||||
return nil, errors.New("db conn is empty")
|
||||
|
|
@ -97,6 +98,7 @@ func NewBulk(conn pool.Pool, table, columns, template string, setSize, sizeLimit
|
|||
}
|
||||
return &bulkImpl{
|
||||
conn: conn,
|
||||
metrics: metrics,
|
||||
table: table,
|
||||
columns: columns,
|
||||
template: template,
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package postgres
|
|||
|
||||
import (
|
||||
"context"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/logger"
|
||||
|
|
@ -21,6 +22,7 @@ type BulkSet struct {
|
|||
log logger.Logger
|
||||
c pool.Pool
|
||||
ctx context.Context
|
||||
metrics database.Database
|
||||
autocompletes Bulk
|
||||
requests Bulk
|
||||
customEvents Bulk
|
||||
|
|
@ -43,10 +45,11 @@ type BulkSet struct {
|
|||
finished chan struct{}
|
||||
}
|
||||
|
||||
func NewBulkSet(log logger.Logger, c pool.Pool) *BulkSet {
|
||||
func NewBulkSet(log logger.Logger, c pool.Pool, metrics database.Database) *BulkSet {
|
||||
bs := &BulkSet{
|
||||
log: log,
|
||||
c: c,
|
||||
metrics: metrics,
|
||||
ctx: context.Background(),
|
||||
workerTask: make(chan *bulksTask, 1),
|
||||
done: make(chan struct{}),
|
||||
|
|
@ -100,7 +103,7 @@ func (conn *BulkSet) Get(name string) Bulk {
|
|||
|
||||
func (conn *BulkSet) initBulks() {
|
||||
var err error
|
||||
conn.autocompletes, err = NewBulk(conn.c,
|
||||
conn.autocompletes, err = NewBulk(conn.c, conn.metrics,
|
||||
"autocomplete",
|
||||
"(value, type, project_id)",
|
||||
"($%d, $%d, $%d)",
|
||||
|
|
@ -108,7 +111,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create autocomplete bulk: %s", err)
|
||||
}
|
||||
conn.requests, err = NewBulk(conn.c,
|
||||
conn.requests, err = NewBulk(conn.c, conn.metrics,
|
||||
"events_common.requests",
|
||||
"(session_id, timestamp, seq_index, url, duration, success)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 8000), $%d, $%d)",
|
||||
|
|
@ -116,7 +119,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create requests bulk: %s", err)
|
||||
}
|
||||
conn.customEvents, err = NewBulk(conn.c,
|
||||
conn.customEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
"events_common.customs",
|
||||
"(session_id, timestamp, seq_index, name, payload)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d)",
|
||||
|
|
@ -124,7 +127,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create customEvents bulk: %s", err)
|
||||
}
|
||||
conn.webPageEvents, err = NewBulk(conn.c,
|
||||
conn.webPageEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.pages",
|
||||
"(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+
|
||||
"load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+
|
||||
|
|
@ -136,7 +139,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webPageEvents bulk: %s", err)
|
||||
}
|
||||
conn.webInputDurations, err = NewBulk(conn.c,
|
||||
conn.webInputDurations, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.inputs",
|
||||
"(session_id, message_id, timestamp, label, hesitation, duration)",
|
||||
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000),''), $%d, $%d)",
|
||||
|
|
@ -144,7 +147,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webInputDurations bulk: %s", err)
|
||||
}
|
||||
conn.webGraphQL, err = NewBulk(conn.c,
|
||||
conn.webGraphQL, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.graphql",
|
||||
"(session_id, timestamp, message_id, name, request_body, response_body)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
|
||||
|
|
@ -152,7 +155,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webGraphQL bulk: %s", err)
|
||||
}
|
||||
conn.webErrors, err = NewBulk(conn.c,
|
||||
conn.webErrors, err = NewBulk(conn.c, conn.metrics,
|
||||
"errors",
|
||||
"(error_id, project_id, source, name, message, payload)",
|
||||
"($%d, $%d, $%d, $%d, $%d, $%d::jsonb)",
|
||||
|
|
@ -160,7 +163,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webErrors bulk: %s", err)
|
||||
}
|
||||
conn.webErrorEvents, err = NewBulk(conn.c,
|
||||
conn.webErrorEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.errors",
|
||||
"(session_id, message_id, timestamp, error_id)",
|
||||
"($%d, $%d, $%d, $%d)",
|
||||
|
|
@ -168,7 +171,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webErrorEvents bulk: %s", err)
|
||||
}
|
||||
conn.webErrorTags, err = NewBulk(conn.c,
|
||||
conn.webErrorTags, err = NewBulk(conn.c, conn.metrics,
|
||||
"public.errors_tags",
|
||||
"(session_id, message_id, error_id, key, value)",
|
||||
"($%d, $%d, $%d, $%d, $%d)",
|
||||
|
|
@ -176,7 +179,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webErrorTags bulk: %s", err)
|
||||
}
|
||||
conn.webIssues, err = NewBulk(conn.c,
|
||||
conn.webIssues, err = NewBulk(conn.c, conn.metrics,
|
||||
"issues",
|
||||
"(project_id, issue_id, type, context_string)",
|
||||
"($%d, $%d, $%d, $%d)",
|
||||
|
|
@ -184,7 +187,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webIssues bulk: %s", err)
|
||||
}
|
||||
conn.webIssueEvents, err = NewBulk(conn.c,
|
||||
conn.webIssueEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
"events_common.issues",
|
||||
"(session_id, issue_id, timestamp, seq_index, payload)",
|
||||
"($%d, $%d, $%d, $%d, CAST($%d AS jsonb))",
|
||||
|
|
@ -192,7 +195,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webIssueEvents bulk: %s", err)
|
||||
}
|
||||
conn.webCustomEvents, err = NewBulk(conn.c,
|
||||
conn.webCustomEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
"events_common.customs",
|
||||
"(session_id, seq_index, timestamp, name, payload, level)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
|
||||
|
|
@ -200,7 +203,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webCustomEvents bulk: %s", err)
|
||||
}
|
||||
conn.webClickEvents, err = NewBulk(conn.c,
|
||||
conn.webClickEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.clicks",
|
||||
"(session_id, message_id, timestamp, label, selector, url, path, hesitation)",
|
||||
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d)",
|
||||
|
|
@ -208,7 +211,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err)
|
||||
}
|
||||
conn.webClickXYEvents, err = NewBulk(conn.c,
|
||||
conn.webClickXYEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.clicks",
|
||||
"(session_id, message_id, timestamp, label, selector, url, path, hesitation, normalized_x, normalized_y)",
|
||||
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d, $%d, $%d)",
|
||||
|
|
@ -216,7 +219,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err)
|
||||
}
|
||||
conn.webNetworkRequest, err = NewBulk(conn.c,
|
||||
conn.webNetworkRequest, err = NewBulk(conn.c, conn.metrics,
|
||||
"events_common.requests",
|
||||
"(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success, transfer_size)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d, $%d)",
|
||||
|
|
@ -224,7 +227,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webNetworkRequest bulk: %s", err)
|
||||
}
|
||||
conn.webCanvasNodes, err = NewBulk(conn.c,
|
||||
conn.webCanvasNodes, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.canvas_recordings",
|
||||
"(session_id, recording_id, timestamp)",
|
||||
"($%d, $%d, $%d)",
|
||||
|
|
@ -232,7 +235,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webCanvasNodes bulk: %s", err)
|
||||
}
|
||||
conn.webTagTriggers, err = NewBulk(conn.c,
|
||||
conn.webTagTriggers, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.tags",
|
||||
"(session_id, timestamp, seq_index, tag_id)",
|
||||
"($%d, $%d, $%d, $%d)",
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package postgres
|
|||
|
||||
import (
|
||||
"context"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres/batch"
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
|
|
@ -22,7 +23,7 @@ type Conn struct {
|
|||
chConn CH
|
||||
}
|
||||
|
||||
func NewConn(log logger.Logger, pool pool.Pool, ch CH) *Conn {
|
||||
func NewConn(log logger.Logger, pool pool.Pool, ch CH, metrics database.Database) *Conn {
|
||||
if pool == nil {
|
||||
log.Fatal(context.Background(), "pg pool is empty")
|
||||
}
|
||||
|
|
@ -30,8 +31,8 @@ func NewConn(log logger.Logger, pool pool.Pool, ch CH) *Conn {
|
|||
log: log,
|
||||
Pool: pool,
|
||||
chConn: ch,
|
||||
bulks: NewBulkSet(log, pool),
|
||||
batches: batch.NewBatchSet(log, pool),
|
||||
bulks: NewBulkSet(log, pool, metrics),
|
||||
batches: batch.NewBatchSet(log, pool, metrics),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -23,58 +23,12 @@ type Pool interface {
|
|||
}
|
||||
|
||||
type poolImpl struct {
|
||||
url string
|
||||
conn *pgxpool.Pool
|
||||
url string
|
||||
conn *pgxpool.Pool
|
||||
metrics database.Database
|
||||
}
|
||||
|
||||
func (p *poolImpl) Query(sql string, args ...interface{}) (pgx.Rows, error) {
|
||||
start := time.Now()
|
||||
res, err := p.conn.Query(getTimeoutContext(), sql, args...)
|
||||
method, table := methodName(sql)
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
database.IncreaseTotalRequests(method, table)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (p *poolImpl) QueryRow(sql string, args ...interface{}) pgx.Row {
|
||||
start := time.Now()
|
||||
res := p.conn.QueryRow(getTimeoutContext(), sql, args...)
|
||||
method, table := methodName(sql)
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
database.IncreaseTotalRequests(method, table)
|
||||
return res
|
||||
}
|
||||
|
||||
func (p *poolImpl) Exec(sql string, arguments ...interface{}) error {
|
||||
start := time.Now()
|
||||
_, err := p.conn.Exec(getTimeoutContext(), sql, arguments...)
|
||||
method, table := methodName(sql)
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
database.IncreaseTotalRequests(method, table)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *poolImpl) SendBatch(b *pgx.Batch) pgx.BatchResults {
|
||||
start := time.Now()
|
||||
res := p.conn.SendBatch(getTimeoutContext(), b)
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "sendBatch", "")
|
||||
database.IncreaseTotalRequests("sendBatch", "")
|
||||
return res
|
||||
}
|
||||
|
||||
func (p *poolImpl) Begin() (*Tx, error) {
|
||||
start := time.Now()
|
||||
tx, err := p.conn.Begin(context.Background())
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "")
|
||||
database.IncreaseTotalRequests("begin", "")
|
||||
return &Tx{tx}, err
|
||||
}
|
||||
|
||||
func (p *poolImpl) Close() {
|
||||
p.conn.Close()
|
||||
}
|
||||
|
||||
func New(url string) (Pool, error) {
|
||||
func New(metrics database.Database, url string) (Pool, error) {
|
||||
if url == "" {
|
||||
return nil, errors.New("pg connection url is empty")
|
||||
}
|
||||
|
|
@ -83,24 +37,73 @@ func New(url string) (Pool, error) {
|
|||
return nil, fmt.Errorf("pgxpool.Connect error: %v", err)
|
||||
}
|
||||
res := &poolImpl{
|
||||
url: url,
|
||||
conn: conn,
|
||||
url: url,
|
||||
conn: conn,
|
||||
metrics: metrics,
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (p *poolImpl) Query(sql string, args ...interface{}) (pgx.Rows, error) {
|
||||
start := time.Now()
|
||||
res, err := p.conn.Query(getTimeoutContext(), sql, args...)
|
||||
method, table := methodName(sql)
|
||||
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
p.metrics.IncreaseTotalRequests(method, table)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (p *poolImpl) QueryRow(sql string, args ...interface{}) pgx.Row {
|
||||
start := time.Now()
|
||||
res := p.conn.QueryRow(getTimeoutContext(), sql, args...)
|
||||
method, table := methodName(sql)
|
||||
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
p.metrics.IncreaseTotalRequests(method, table)
|
||||
return res
|
||||
}
|
||||
|
||||
func (p *poolImpl) Exec(sql string, arguments ...interface{}) error {
|
||||
start := time.Now()
|
||||
_, err := p.conn.Exec(getTimeoutContext(), sql, arguments...)
|
||||
method, table := methodName(sql)
|
||||
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
p.metrics.IncreaseTotalRequests(method, table)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *poolImpl) SendBatch(b *pgx.Batch) pgx.BatchResults {
|
||||
start := time.Now()
|
||||
res := p.conn.SendBatch(getTimeoutContext(), b)
|
||||
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "sendBatch", "")
|
||||
p.metrics.IncreaseTotalRequests("sendBatch", "")
|
||||
return res
|
||||
}
|
||||
|
||||
func (p *poolImpl) Begin() (*Tx, error) {
|
||||
start := time.Now()
|
||||
tx, err := p.conn.Begin(context.Background())
|
||||
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "")
|
||||
p.metrics.IncreaseTotalRequests("begin", "")
|
||||
return &Tx{tx, p.metrics}, err
|
||||
}
|
||||
|
||||
func (p *poolImpl) Close() {
|
||||
p.conn.Close()
|
||||
}
|
||||
|
||||
// TX - start
|
||||
|
||||
type Tx struct {
|
||||
pgx.Tx
|
||||
metrics database.Database
|
||||
}
|
||||
|
||||
func (tx *Tx) TxExec(sql string, args ...interface{}) error {
|
||||
start := time.Now()
|
||||
_, err := tx.Exec(context.Background(), sql, args...)
|
||||
method, table := methodName(sql)
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
database.IncreaseTotalRequests(method, table)
|
||||
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
tx.metrics.IncreaseTotalRequests(method, table)
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -108,24 +111,24 @@ func (tx *Tx) TxQueryRow(sql string, args ...interface{}) pgx.Row {
|
|||
start := time.Now()
|
||||
res := tx.QueryRow(context.Background(), sql, args...)
|
||||
method, table := methodName(sql)
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
database.IncreaseTotalRequests(method, table)
|
||||
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
tx.metrics.IncreaseTotalRequests(method, table)
|
||||
return res
|
||||
}
|
||||
|
||||
func (tx *Tx) TxRollback() error {
|
||||
start := time.Now()
|
||||
err := tx.Rollback(context.Background())
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "")
|
||||
database.IncreaseTotalRequests("rollback", "")
|
||||
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "")
|
||||
tx.metrics.IncreaseTotalRequests("rollback", "")
|
||||
return err
|
||||
}
|
||||
|
||||
func (tx *Tx) TxCommit() error {
|
||||
start := time.Now()
|
||||
err := tx.Commit(context.Background())
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "")
|
||||
database.IncreaseTotalRequests("commit", "")
|
||||
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "")
|
||||
tx.metrics.IncreaseTotalRequests("commit", "")
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package integrations
|
|||
|
||||
import (
|
||||
"openreplay/backend/pkg/integrations/service"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/metrics/web"
|
||||
"openreplay/backend/pkg/server/tracer"
|
||||
"time"
|
||||
|
|
@ -23,7 +24,7 @@ type ServiceBuilder struct {
|
|||
IntegrationsAPI api.Handlers
|
||||
}
|
||||
|
||||
func NewServiceBuilder(log logger.Logger, cfg *integrations.Config, webMetrics web.Web, pgconn pool.Pool) (*ServiceBuilder, error) {
|
||||
func NewServiceBuilder(log logger.Logger, cfg *integrations.Config, webMetrics web.Web, dbMetrics database.Database, pgconn pool.Pool) (*ServiceBuilder, error) {
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -37,7 +38,7 @@ func NewServiceBuilder(log logger.Logger, cfg *integrations.Config, webMetrics w
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auditrail, err := tracer.NewTracer(log, pgconn)
|
||||
auditrail, err := tracer.NewTracer(log, pgconn, dbMetrics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,11 +8,13 @@ import (
|
|||
type sinkIteratorImpl struct {
|
||||
coreIterator MessageIterator
|
||||
handler MessageHandler
|
||||
metrics sink.Sink
|
||||
}
|
||||
|
||||
func NewSinkMessageIterator(log logger.Logger, messageHandler MessageHandler, messageFilter []int, autoDecode bool) MessageIterator {
|
||||
func NewSinkMessageIterator(log logger.Logger, messageHandler MessageHandler, messageFilter []int, autoDecode bool, metrics sink.Sink) MessageIterator {
|
||||
iter := &sinkIteratorImpl{
|
||||
handler: messageHandler,
|
||||
metrics: metrics,
|
||||
}
|
||||
iter.coreIterator = NewMessageIterator(log, iter.handle, messageFilter, autoDecode)
|
||||
return iter
|
||||
|
|
@ -23,8 +25,8 @@ func (i *sinkIteratorImpl) handle(message Message) {
|
|||
}
|
||||
|
||||
func (i *sinkIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
||||
sink.RecordBatchSize(float64(len(batchData)))
|
||||
sink.IncreaseTotalBatches()
|
||||
i.metrics.RecordBatchSize(float64(len(batchData)))
|
||||
i.metrics.IncreaseTotalBatches()
|
||||
// Call core iterator
|
||||
i.coreIterator.Iterate(batchData, batchInfo)
|
||||
// Send batch end signal
|
||||
|
|
|
|||
|
|
@ -1,22 +0,0 @@
|
|||
package analytics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
var cardCreated = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "card",
|
||||
Name: "created",
|
||||
Help: "Histogram for tracking card creation",
|
||||
Buckets: common.DefaultBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
cardCreated,
|
||||
}
|
||||
}
|
||||
|
|
@ -2,71 +2,22 @@ package assets
|
|||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var assetsProcessedSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "assets",
|
||||
Name: "processed_total",
|
||||
Help: "A counter displaying the total count of processed assets.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseProcessesSessions() {
|
||||
assetsProcessedSessions.Inc()
|
||||
type Assets interface {
|
||||
IncreaseProcessesSessions()
|
||||
IncreaseSavedSessions()
|
||||
RecordDownloadDuration(durMillis float64, code int)
|
||||
RecordUploadDuration(durMillis float64, isFailed bool)
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
var assetsSavedSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "assets",
|
||||
Name: "saved_total",
|
||||
Help: "A counter displaying the total number of cached assets.",
|
||||
},
|
||||
)
|
||||
type assetsImpl struct{}
|
||||
|
||||
func IncreaseSavedSessions() {
|
||||
assetsSavedSessions.Inc()
|
||||
}
|
||||
func New(serviceName string) Assets { return &assetsImpl{} }
|
||||
|
||||
var assetsDownloadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "assets",
|
||||
Name: "download_duration_seconds",
|
||||
Help: "A histogram displaying the duration of downloading for each asset in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"response_code"},
|
||||
)
|
||||
|
||||
func RecordDownloadDuration(durMillis float64, code int) {
|
||||
assetsDownloadDuration.WithLabelValues(strconv.Itoa(code)).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var assetsUploadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "assets",
|
||||
Name: "upload_s3_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading to s3 for each asset in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"failed"},
|
||||
)
|
||||
|
||||
func RecordUploadDuration(durMillis float64, isFailed bool) {
|
||||
failed := "false"
|
||||
if isFailed {
|
||||
failed = "true"
|
||||
}
|
||||
assetsUploadDuration.WithLabelValues(failed).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
assetsProcessedSessions,
|
||||
assetsSavedSessions,
|
||||
assetsDownloadDuration,
|
||||
assetsUploadDuration,
|
||||
}
|
||||
}
|
||||
func (a *assetsImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
||||
func (a *assetsImpl) IncreaseProcessesSessions() {}
|
||||
func (a *assetsImpl) IncreaseSavedSessions() {}
|
||||
func (a *assetsImpl) RecordDownloadDuration(durMillis float64, code int) {}
|
||||
func (a *assetsImpl) RecordUploadDuration(durMillis float64, isFailed bool) {}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package canvas
|
|||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
type Canvas interface {
|
||||
|
|
@ -18,175 +17,17 @@ type Canvas interface {
|
|||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
type canvasImpl struct {
|
||||
canvasesImageSize prometheus.Histogram
|
||||
canvasesTotalSavedImages prometheus.Counter
|
||||
canvasesImagesPerCanvas prometheus.Histogram
|
||||
canvasesCanvasesPerSession prometheus.Histogram
|
||||
canvasesPreparingDuration prometheus.Histogram
|
||||
canvasesTotalCreatedArchives prometheus.Counter
|
||||
canvasesArchivingDuration prometheus.Histogram
|
||||
canvasesArchiveSize prometheus.Histogram
|
||||
canvasesUploadingDuration prometheus.Histogram
|
||||
}
|
||||
type canvasImpl struct{}
|
||||
|
||||
func New(serviceName string) Canvas {
|
||||
return &canvasImpl{
|
||||
canvasesImageSize: newImageSizeMetric(serviceName),
|
||||
canvasesTotalSavedImages: newTotalSavedImages(serviceName),
|
||||
canvasesImagesPerCanvas: newImagesPerCanvas(serviceName),
|
||||
canvasesCanvasesPerSession: newCanvasesPerSession(serviceName),
|
||||
canvasesPreparingDuration: newPreparingDuration(serviceName),
|
||||
canvasesTotalCreatedArchives: newTotalCreatedArchives(serviceName),
|
||||
canvasesArchivingDuration: newArchivingDuration(serviceName),
|
||||
canvasesArchiveSize: newArchiveSize(serviceName),
|
||||
canvasesUploadingDuration: newUploadingDuration(serviceName),
|
||||
}
|
||||
}
|
||||
func New(serviceName string) Canvas { return &canvasImpl{} }
|
||||
|
||||
func (c *canvasImpl) List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
c.canvasesImageSize,
|
||||
c.canvasesTotalSavedImages,
|
||||
c.canvasesImagesPerCanvas,
|
||||
c.canvasesCanvasesPerSession,
|
||||
c.canvasesPreparingDuration,
|
||||
c.canvasesTotalCreatedArchives,
|
||||
c.canvasesArchivingDuration,
|
||||
c.canvasesArchiveSize,
|
||||
c.canvasesUploadingDuration,
|
||||
}
|
||||
}
|
||||
|
||||
func newImageSizeMetric(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "image_size_bytes",
|
||||
Help: "A histogram displaying the size of each canvas image in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) RecordCanvasImageSize(size float64) {
|
||||
c.canvasesImageSize.Observe(size)
|
||||
}
|
||||
|
||||
func newTotalSavedImages(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "total_saved_images",
|
||||
Help: "A counter displaying the total number of saved images.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) IncreaseTotalSavedImages() {
|
||||
c.canvasesTotalSavedImages.Inc()
|
||||
}
|
||||
|
||||
func newImagesPerCanvas(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "images_per_canvas",
|
||||
Help: "A histogram displaying the number of images per canvas.",
|
||||
Buckets: common.DefaultBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) RecordImagesPerCanvas(number float64) {
|
||||
c.canvasesImagesPerCanvas.Observe(number)
|
||||
}
|
||||
|
||||
func newCanvasesPerSession(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "canvases_per_session",
|
||||
Help: "A histogram displaying the number of canvases per session.",
|
||||
Buckets: common.DefaultBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) RecordCanvasesPerSession(number float64) {
|
||||
c.canvasesCanvasesPerSession.Observe(number)
|
||||
}
|
||||
|
||||
func newPreparingDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "preparing_duration_seconds",
|
||||
Help: "A histogram displaying the duration of preparing the list of canvases for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) RecordPreparingDuration(duration float64) {
|
||||
c.canvasesPreparingDuration.Observe(duration)
|
||||
}
|
||||
|
||||
func newTotalCreatedArchives(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "total_created_archives",
|
||||
Help: "A counter displaying the total number of created canvas archives.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) IncreaseTotalCreatedArchives() {
|
||||
c.canvasesTotalCreatedArchives.Inc()
|
||||
}
|
||||
|
||||
func newArchivingDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "archiving_duration_seconds",
|
||||
Help: "A histogram displaying the duration of archiving for each canvas in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) RecordArchivingDuration(duration float64) {
|
||||
c.canvasesArchivingDuration.Observe(duration)
|
||||
}
|
||||
|
||||
func newArchiveSize(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "archive_size_bytes",
|
||||
Help: "A histogram displaying the size of each canvas archive in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) RecordArchiveSize(size float64) {
|
||||
c.canvasesArchiveSize.Observe(size)
|
||||
}
|
||||
|
||||
func newUploadingDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "uploading_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading for each canvas in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) RecordUploadingDuration(duration float64) {
|
||||
c.canvasesUploadingDuration.Observe(duration)
|
||||
}
|
||||
func (c *canvasImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
||||
func (c *canvasImpl) RecordCanvasImageSize(size float64) {}
|
||||
func (c *canvasImpl) IncreaseTotalSavedImages() {}
|
||||
func (c *canvasImpl) RecordImagesPerCanvas(number float64) {}
|
||||
func (c *canvasImpl) RecordCanvasesPerSession(number float64) {}
|
||||
func (c *canvasImpl) RecordPreparingDuration(duration float64) {}
|
||||
func (c *canvasImpl) IncreaseTotalCreatedArchives() {}
|
||||
func (c *canvasImpl) RecordArchivingDuration(duration float64) {}
|
||||
func (c *canvasImpl) RecordArchiveSize(size float64) {}
|
||||
func (c *canvasImpl) RecordUploadingDuration(duration float64) {}
|
||||
|
|
|
|||
|
|
@ -2,141 +2,32 @@ package database
|
|||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
var dbBatchElements = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "batch_size_elements",
|
||||
Help: "A histogram displaying the number of SQL commands in each batch.",
|
||||
Buckets: common.DefaultBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordBatchElements(number float64) {
|
||||
dbBatchElements.Observe(number)
|
||||
type Database interface {
|
||||
RecordBatchElements(number float64)
|
||||
RecordBatchInsertDuration(durMillis float64)
|
||||
RecordBulkSize(size float64, db, table string)
|
||||
RecordBulkElements(size float64, db, table string)
|
||||
RecordBulkInsertDuration(durMillis float64, db, table string)
|
||||
RecordRequestDuration(durMillis float64, method, table string)
|
||||
IncreaseTotalRequests(method, table string)
|
||||
IncreaseRedisRequests(method, table string)
|
||||
RecordRedisRequestDuration(durMillis float64, method, table string)
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
var dbBatchInsertDuration = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "batch_insert_duration_seconds",
|
||||
Help: "A histogram displaying the duration of batch inserts in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
type databaseImpl struct{}
|
||||
|
||||
func RecordBatchInsertDuration(durMillis float64) {
|
||||
dbBatchInsertDuration.Observe(durMillis / 1000.0)
|
||||
}
|
||||
func New(serviceName string) Database { return &databaseImpl{} }
|
||||
|
||||
var dbBulkSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "bulk_size_bytes",
|
||||
Help: "A histogram displaying the bulk size in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"db", "table"},
|
||||
)
|
||||
|
||||
func RecordBulkSize(size float64, db, table string) {
|
||||
dbBulkSize.WithLabelValues(db, table).Observe(size)
|
||||
}
|
||||
|
||||
var dbBulkElements = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "bulk_size_elements",
|
||||
Help: "A histogram displaying the size of data set in each bulk.",
|
||||
Buckets: common.DefaultBuckets,
|
||||
},
|
||||
[]string{"db", "table"},
|
||||
)
|
||||
|
||||
func RecordBulkElements(size float64, db, table string) {
|
||||
dbBulkElements.WithLabelValues(db, table).Observe(size)
|
||||
}
|
||||
|
||||
var dbBulkInsertDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "bulk_insert_duration_seconds",
|
||||
Help: "A histogram displaying the duration of bulk inserts in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"db", "table"},
|
||||
)
|
||||
|
||||
func RecordBulkInsertDuration(durMillis float64, db, table string) {
|
||||
dbBulkInsertDuration.WithLabelValues(db, table).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var dbRequestDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "request_duration_seconds",
|
||||
Help: "A histogram displaying the duration of each sql request in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"method", "table"},
|
||||
)
|
||||
|
||||
func RecordRequestDuration(durMillis float64, method, table string) {
|
||||
dbRequestDuration.WithLabelValues(method, table).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var dbTotalRequests = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "db",
|
||||
Name: "requests_total",
|
||||
Help: "A counter showing the total number of all SQL requests.",
|
||||
},
|
||||
[]string{"method", "table"},
|
||||
)
|
||||
|
||||
func IncreaseTotalRequests(method, table string) {
|
||||
dbTotalRequests.WithLabelValues(method, table).Inc()
|
||||
}
|
||||
|
||||
var cacheRedisRequests = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "cache",
|
||||
Name: "redis_requests_total",
|
||||
Help: "A counter showing the total number of all Redis requests.",
|
||||
},
|
||||
[]string{"method", "table"},
|
||||
)
|
||||
|
||||
func IncreaseRedisRequests(method, table string) {
|
||||
cacheRedisRequests.WithLabelValues(method, table).Inc()
|
||||
}
|
||||
|
||||
var cacheRedisRequestDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "cache",
|
||||
Name: "redis_request_duration_seconds",
|
||||
Help: "A histogram displaying the duration of each Redis request in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"method", "table"},
|
||||
)
|
||||
|
||||
func RecordRedisRequestDuration(durMillis float64, method, table string) {
|
||||
cacheRedisRequestDuration.WithLabelValues(method, table).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
dbBatchElements,
|
||||
dbBatchInsertDuration,
|
||||
dbBulkSize,
|
||||
dbBulkElements,
|
||||
dbBulkInsertDuration,
|
||||
dbRequestDuration,
|
||||
dbTotalRequests,
|
||||
cacheRedisRequests,
|
||||
cacheRedisRequestDuration,
|
||||
}
|
||||
}
|
||||
func (d *databaseImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
||||
func (d *databaseImpl) RecordBatchElements(number float64) {}
|
||||
func (d *databaseImpl) RecordBatchInsertDuration(durMillis float64) {}
|
||||
func (d *databaseImpl) RecordBulkSize(size float64, db, table string) {}
|
||||
func (d *databaseImpl) RecordBulkElements(size float64, db, table string) {}
|
||||
func (d *databaseImpl) RecordBulkInsertDuration(durMillis float64, db, table string) {}
|
||||
func (d *databaseImpl) RecordRequestDuration(durMillis float64, method, table string) {}
|
||||
func (d *databaseImpl) IncreaseTotalRequests(method, table string) {}
|
||||
func (d *databaseImpl) IncreaseRedisRequests(method, table string) {}
|
||||
func (d *databaseImpl) RecordRedisRequestDuration(durMillis float64, method, table string) {}
|
||||
|
|
|
|||
|
|
@ -2,50 +2,20 @@ package ender
|
|||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
var enderActiveSessions = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "ender",
|
||||
Name: "sessions_active",
|
||||
Help: "A gauge displaying the number of active (live) sessions.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseActiveSessions() {
|
||||
enderActiveSessions.Inc()
|
||||
type Ender interface {
|
||||
IncreaseActiveSessions()
|
||||
DecreaseActiveSessions()
|
||||
IncreaseClosedSessions()
|
||||
IncreaseTotalSessions()
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
func DecreaseActiveSessions() {
|
||||
enderActiveSessions.Dec()
|
||||
}
|
||||
type enderImpl struct{}
|
||||
|
||||
var enderClosedSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "ender",
|
||||
Name: "sessions_closed",
|
||||
Help: "A counter displaying the number of closed sessions (sent SessionEnd).",
|
||||
},
|
||||
)
|
||||
func New(serviceName string) Ender { return &enderImpl{} }
|
||||
|
||||
func IncreaseClosedSessions() {
|
||||
enderClosedSessions.Inc()
|
||||
}
|
||||
|
||||
var enderTotalSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "ender",
|
||||
Name: "sessions_total",
|
||||
Help: "A counter displaying the number of all processed sessions.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseTotalSessions() {
|
||||
enderTotalSessions.Inc()
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
enderActiveSessions,
|
||||
enderClosedSessions,
|
||||
enderTotalSessions,
|
||||
}
|
||||
}
|
||||
func (e *enderImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
||||
func (e *enderImpl) IncreaseActiveSessions() {}
|
||||
func (e *enderImpl) DecreaseActiveSessions() {}
|
||||
func (e *enderImpl) IncreaseClosedSessions() {}
|
||||
func (e *enderImpl) IncreaseTotalSessions() {}
|
||||
|
|
|
|||
|
|
@ -2,65 +2,16 @@ package heuristics
|
|||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var heuristicsTotalEvents = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "heuristics",
|
||||
Name: "events_total",
|
||||
Help: "A counter displaying the number of all processed events",
|
||||
},
|
||||
[]string{"type"},
|
||||
)
|
||||
|
||||
func IncreaseTotalEvents(eventType string) {
|
||||
heuristicsTotalEvents.WithLabelValues(eventType).Inc()
|
||||
type Heuristics interface {
|
||||
IncreaseTotalEvents(eventType string)
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
var heuristicsRequestSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "heuristics",
|
||||
Name: "request_size_bytes",
|
||||
Help: "A histogram displaying the size of each HTTP request in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"url", "response_code"},
|
||||
)
|
||||
type heuristicsImpl struct{}
|
||||
|
||||
func RecordRequestSize(size float64, url string, code int) {
|
||||
heuristicsRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size)
|
||||
}
|
||||
func New(serviceName string) Heuristics { return &heuristicsImpl{} }
|
||||
|
||||
var heuristicsRequestDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "heuristics",
|
||||
Name: "request_duration_seconds",
|
||||
Help: "A histogram displaying the duration of each HTTP request in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"url", "response_code"},
|
||||
)
|
||||
|
||||
func RecordRequestDuration(durMillis float64, url string, code int) {
|
||||
heuristicsRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var heuristicsTotalRequests = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "heuristics",
|
||||
Name: "requests_total",
|
||||
Help: "A counter displaying the number all HTTP requests.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseTotalRequests() {
|
||||
heuristicsTotalRequests.Inc()
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
heuristicsTotalEvents,
|
||||
}
|
||||
}
|
||||
func (h *heuristicsImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
||||
func (h *heuristicsImpl) IncreaseTotalEvents(eventType string) {}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package images
|
|||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
type Images interface {
|
||||
|
|
@ -18,174 +17,17 @@ type Images interface {
|
|||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
type imagesImpl struct {
|
||||
originalArchiveSize prometheus.Histogram
|
||||
originalArchiveExtractionDuration prometheus.Histogram
|
||||
totalSavedArchives prometheus.Counter
|
||||
savingImageDuration prometheus.Histogram
|
||||
totalSavedImages prometheus.Counter
|
||||
totalCreatedArchives prometheus.Counter
|
||||
archivingDuration prometheus.Histogram
|
||||
archiveSize prometheus.Histogram
|
||||
uploadingDuration prometheus.Histogram
|
||||
}
|
||||
type imagesImpl struct{}
|
||||
|
||||
func New(serviceName string) Images {
|
||||
return &imagesImpl{
|
||||
originalArchiveSize: newOriginalArchiveSize(serviceName),
|
||||
originalArchiveExtractionDuration: newOriginalArchiveExtractionDuration(serviceName),
|
||||
totalSavedArchives: newTotalSavedArchives(serviceName),
|
||||
savingImageDuration: newSavingImageDuration(serviceName),
|
||||
totalSavedImages: newTotalSavedImages(serviceName),
|
||||
totalCreatedArchives: newTotalCreatedArchives(serviceName),
|
||||
archivingDuration: newArchivingDuration(serviceName),
|
||||
archiveSize: newArchiveSize(serviceName),
|
||||
uploadingDuration: newUploadingDuration(serviceName),
|
||||
}
|
||||
}
|
||||
func New(serviceName string) Images { return &imagesImpl{} }
|
||||
|
||||
func (i *imagesImpl) List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
i.originalArchiveSize,
|
||||
i.originalArchiveExtractionDuration,
|
||||
i.totalSavedArchives,
|
||||
i.savingImageDuration,
|
||||
i.totalSavedImages,
|
||||
i.totalCreatedArchives,
|
||||
i.archivingDuration,
|
||||
i.archiveSize,
|
||||
i.uploadingDuration,
|
||||
}
|
||||
}
|
||||
|
||||
func newOriginalArchiveSize(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "original_archive_size_bytes",
|
||||
Help: "A histogram displaying the original archive size in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) RecordOriginalArchiveSize(size float64) {
|
||||
i.archiveSize.Observe(size)
|
||||
}
|
||||
|
||||
func newOriginalArchiveExtractionDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "original_archive_extraction_duration_seconds",
|
||||
Help: "A histogram displaying the duration of extracting the original archive.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) RecordOriginalArchiveExtractionDuration(duration float64) {
|
||||
i.originalArchiveExtractionDuration.Observe(duration)
|
||||
}
|
||||
|
||||
func newTotalSavedArchives(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "total_saved_archives",
|
||||
Help: "A counter displaying the total number of saved original archives.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) IncreaseTotalSavedArchives() {
|
||||
i.totalSavedArchives.Inc()
|
||||
}
|
||||
|
||||
func newSavingImageDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "saving_image_duration_seconds",
|
||||
Help: "A histogram displaying the duration of saving each image in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) RecordSavingImageDuration(duration float64) {
|
||||
i.savingImageDuration.Observe(duration)
|
||||
}
|
||||
|
||||
func newTotalSavedImages(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "total_saved_images",
|
||||
Help: "A counter displaying the total number of saved images.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) IncreaseTotalSavedImages() {
|
||||
i.totalSavedImages.Inc()
|
||||
}
|
||||
|
||||
func newTotalCreatedArchives(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "total_created_archives",
|
||||
Help: "A counter displaying the total number of created archives.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) IncreaseTotalCreatedArchives() {
|
||||
i.totalCreatedArchives.Inc()
|
||||
}
|
||||
|
||||
func newArchivingDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "archiving_duration_seconds",
|
||||
Help: "A histogram displaying the duration of archiving each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) RecordArchivingDuration(duration float64) {
|
||||
i.archivingDuration.Observe(duration)
|
||||
}
|
||||
|
||||
func newArchiveSize(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "archive_size_bytes",
|
||||
Help: "A histogram displaying the session's archive size in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) RecordArchiveSize(size float64) {
|
||||
i.archiveSize.Observe(size)
|
||||
}
|
||||
|
||||
func newUploadingDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "uploading_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading each session's archive to S3 in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) RecordUploadingDuration(duration float64) {
|
||||
i.uploadingDuration.Observe(duration)
|
||||
}
|
||||
func (i *imagesImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
||||
func (i *imagesImpl) RecordOriginalArchiveSize(size float64) {}
|
||||
func (i *imagesImpl) RecordOriginalArchiveExtractionDuration(duration float64) {}
|
||||
func (i *imagesImpl) IncreaseTotalSavedArchives() {}
|
||||
func (i *imagesImpl) RecordSavingImageDuration(duration float64) {}
|
||||
func (i *imagesImpl) IncreaseTotalSavedImages() {}
|
||||
func (i *imagesImpl) IncreaseTotalCreatedArchives() {}
|
||||
func (i *imagesImpl) RecordArchivingDuration(duration float64) {}
|
||||
func (i *imagesImpl) RecordArchiveSize(size float64) {}
|
||||
func (i *imagesImpl) RecordUploadingDuration(duration float64) {}
|
||||
|
|
|
|||
|
|
@ -1,38 +1,10 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type MetricServer struct {
|
||||
registry *prometheus.Registry
|
||||
}
|
||||
type MetricServer struct{}
|
||||
|
||||
func New(log logger.Logger, cs []prometheus.Collector) {
|
||||
registry := prometheus.NewRegistry()
|
||||
// Add go runtime metrics and process collectors.
|
||||
registry.MustRegister(
|
||||
collectors.NewGoCollector(),
|
||||
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
|
||||
)
|
||||
// Add extra metrics
|
||||
registry.MustRegister(cs...)
|
||||
// Expose /metrics HTTP endpoint using the created custom registry.
|
||||
http.Handle(
|
||||
"/metrics", promhttp.HandlerFor(
|
||||
registry,
|
||||
promhttp.HandlerOpts{
|
||||
EnableOpenMetrics: true,
|
||||
}),
|
||||
)
|
||||
go func() {
|
||||
log.Error(context.Background(), "%v", http.ListenAndServe(":8888", nil))
|
||||
}()
|
||||
}
|
||||
func New(log logger.Logger, cs []prometheus.Collector) {}
|
||||
|
|
|
|||
|
|
@ -2,184 +2,40 @@ package sink
|
|||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
var sinkMessageSize = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "sink",
|
||||
Name: "message_size_bytes",
|
||||
Help: "A histogram displaying the size of each message in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordMessageSize(size float64) {
|
||||
sinkMessageSize.Observe(size)
|
||||
type Sink interface {
|
||||
RecordMessageSize(size float64)
|
||||
IncreaseWrittenMessages()
|
||||
IncreaseTotalMessages()
|
||||
RecordBatchSize(size float64)
|
||||
IncreaseTotalBatches()
|
||||
RecordWrittenBytes(size float64, fileType string)
|
||||
IncreaseTotalWrittenBytes(size float64, fileType string)
|
||||
IncreaseCachedAssets()
|
||||
DecreaseCachedAssets()
|
||||
IncreaseSkippedAssets()
|
||||
IncreaseTotalAssets()
|
||||
RecordAssetSize(size float64)
|
||||
RecordProcessAssetDuration(durMillis float64)
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
var sinkWrittenMessages = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "messages_written",
|
||||
Help: "A counter displaying the total number of all written messages.",
|
||||
},
|
||||
)
|
||||
type sinkImpl struct{}
|
||||
|
||||
func IncreaseWrittenMessages() {
|
||||
sinkWrittenMessages.Inc()
|
||||
}
|
||||
func New(serviceName string) Sink { return &sinkImpl{} }
|
||||
|
||||
var sinkTotalMessages = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "messages_total",
|
||||
Help: "A counter displaying the total number of all processed messages.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseTotalMessages() {
|
||||
sinkTotalMessages.Inc()
|
||||
}
|
||||
|
||||
var sinkBatchSize = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "sink",
|
||||
Name: "batch_size_bytes",
|
||||
Help: "A histogram displaying the size of each batch in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordBatchSize(size float64) {
|
||||
sinkBatchSize.Observe(size)
|
||||
}
|
||||
|
||||
var sinkTotalBatches = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "batches_total",
|
||||
Help: "A counter displaying the total number of all written batches.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseTotalBatches() {
|
||||
sinkTotalBatches.Inc()
|
||||
}
|
||||
|
||||
var sinkWrittenBytes = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "sink",
|
||||
Name: "written_bytes",
|
||||
Help: "A histogram displaying the size of buffer in bytes written to session file.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordWrittenBytes(size float64, fileType string) {
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
sinkWrittenBytes.WithLabelValues(fileType).Observe(size)
|
||||
IncreaseTotalWrittenBytes(size, fileType)
|
||||
}
|
||||
|
||||
var sinkTotalWrittenBytes = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "written_bytes_total",
|
||||
Help: "A counter displaying the total number of bytes written to all session files.",
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func IncreaseTotalWrittenBytes(size float64, fileType string) {
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
sinkTotalWrittenBytes.WithLabelValues(fileType).Add(size)
|
||||
}
|
||||
|
||||
var sinkCachedAssets = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "sink",
|
||||
Name: "assets_cached",
|
||||
Help: "A gauge displaying the current number of cached assets.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseCachedAssets() {
|
||||
sinkCachedAssets.Inc()
|
||||
}
|
||||
|
||||
func DecreaseCachedAssets() {
|
||||
sinkCachedAssets.Dec()
|
||||
}
|
||||
|
||||
var sinkSkippedAssets = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "assets_skipped",
|
||||
Help: "A counter displaying the total number of all skipped assets.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseSkippedAssets() {
|
||||
sinkSkippedAssets.Inc()
|
||||
}
|
||||
|
||||
var sinkTotalAssets = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "assets_total",
|
||||
Help: "A counter displaying the total number of all processed assets.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseTotalAssets() {
|
||||
sinkTotalAssets.Inc()
|
||||
}
|
||||
|
||||
var sinkAssetSize = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "sink",
|
||||
Name: "asset_size_bytes",
|
||||
Help: "A histogram displaying the size of each asset in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordAssetSize(size float64) {
|
||||
sinkAssetSize.Observe(size)
|
||||
}
|
||||
|
||||
var sinkProcessAssetDuration = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "sink",
|
||||
Name: "asset_process_duration_seconds",
|
||||
Help: "A histogram displaying the duration of processing for each asset in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordProcessAssetDuration(durMillis float64) {
|
||||
sinkProcessAssetDuration.Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
sinkMessageSize,
|
||||
sinkWrittenMessages,
|
||||
sinkTotalMessages,
|
||||
sinkBatchSize,
|
||||
sinkTotalBatches,
|
||||
sinkWrittenBytes,
|
||||
sinkTotalWrittenBytes,
|
||||
sinkCachedAssets,
|
||||
sinkSkippedAssets,
|
||||
sinkTotalAssets,
|
||||
sinkAssetSize,
|
||||
sinkProcessAssetDuration,
|
||||
}
|
||||
}
|
||||
func (s *sinkImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
||||
func (s *sinkImpl) RecordMessageSize(size float64) {}
|
||||
func (s *sinkImpl) IncreaseWrittenMessages() {}
|
||||
func (s *sinkImpl) IncreaseTotalMessages() {}
|
||||
func (s *sinkImpl) RecordBatchSize(size float64) {}
|
||||
func (s *sinkImpl) IncreaseTotalBatches() {}
|
||||
func (s *sinkImpl) RecordWrittenBytes(size float64, fileType string) {}
|
||||
func (s *sinkImpl) IncreaseTotalWrittenBytes(size float64, fileType string) {}
|
||||
func (s *sinkImpl) IncreaseCachedAssets() {}
|
||||
func (s *sinkImpl) DecreaseCachedAssets() {}
|
||||
func (s *sinkImpl) IncreaseSkippedAssets() {}
|
||||
func (s *sinkImpl) IncreaseTotalAssets() {}
|
||||
func (s *sinkImpl) RecordAssetSize(size float64) {}
|
||||
func (s *sinkImpl) RecordProcessAssetDuration(durMillis float64) {}
|
||||
|
|
|
|||
|
|
@ -2,148 +2,34 @@ package spot
|
|||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
var spotOriginalVideoSize = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "spot",
|
||||
Name: "original_video_size_bytes",
|
||||
Help: "A histogram displaying the size of each original video in bytes.",
|
||||
Buckets: common.VideoSizeBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordOriginalVideoSize(size float64) {
|
||||
spotOriginalVideoSize.Observe(size)
|
||||
type Spot interface {
|
||||
RecordOriginalVideoSize(size float64)
|
||||
RecordCroppedVideoSize(size float64)
|
||||
IncreaseVideosTotal()
|
||||
IncreaseVideosCropped()
|
||||
IncreaseVideosTranscoded()
|
||||
RecordOriginalVideoDownloadDuration(durMillis float64)
|
||||
RecordCroppingDuration(durMillis float64)
|
||||
RecordCroppedVideoUploadDuration(durMillis float64)
|
||||
RecordTranscodingDuration(durMillis float64)
|
||||
RecordTranscodedVideoUploadDuration(durMillis float64)
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
var spotCroppedVideoSize = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "spot",
|
||||
Name: "cropped_video_size_bytes",
|
||||
Help: "A histogram displaying the size of each cropped video in bytes.",
|
||||
Buckets: common.VideoSizeBuckets,
|
||||
},
|
||||
)
|
||||
type spotImpl struct{}
|
||||
|
||||
func RecordCroppedVideoSize(size float64) {
|
||||
spotCroppedVideoSize.Observe(size)
|
||||
}
|
||||
func New(serviceName string) Spot { return &spotImpl{} }
|
||||
|
||||
var spotVideosTotal = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "spot",
|
||||
Name: "videos_total",
|
||||
Help: "A counter displaying the total number of all processed videos.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseVideosTotal() {
|
||||
spotVideosTotal.Inc()
|
||||
}
|
||||
|
||||
var spotVideosCropped = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "spot",
|
||||
Name: "videos_cropped_total",
|
||||
Help: "A counter displaying the total number of all cropped videos.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseVideosCropped() {
|
||||
spotVideosCropped.Inc()
|
||||
}
|
||||
|
||||
var spotVideosTranscoded = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "spot",
|
||||
Name: "videos_transcoded_total",
|
||||
Help: "A counter displaying the total number of all transcoded videos.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseVideosTranscoded() {
|
||||
spotVideosTranscoded.Inc()
|
||||
}
|
||||
|
||||
var spotOriginalVideoDownloadDuration = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "spot",
|
||||
Name: "original_video_download_duration_seconds",
|
||||
Help: "A histogram displaying the duration of downloading each original video in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordOriginalVideoDownloadDuration(durMillis float64) {
|
||||
spotOriginalVideoDownloadDuration.Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var spotCroppingDuration = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "spot",
|
||||
Name: "cropping_duration_seconds",
|
||||
Help: "A histogram displaying the duration of cropping each video in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordCroppingDuration(durMillis float64) {
|
||||
spotCroppingDuration.Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var spotCroppedVideoUploadDuration = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "spot",
|
||||
Name: "cropped_video_upload_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading each cropped video in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordCroppedVideoUploadDuration(durMillis float64) {
|
||||
spotCroppedVideoUploadDuration.Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var spotTranscodingDuration = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "spot",
|
||||
Name: "transcoding_duration_seconds",
|
||||
Help: "A histogram displaying the duration of transcoding each video in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordTranscodingDuration(durMillis float64) {
|
||||
spotTranscodingDuration.Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var spotTranscodedVideoUploadDuration = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "spot",
|
||||
Name: "transcoded_video_upload_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading each transcoded video in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordTranscodedVideoUploadDuration(durMillis float64) {
|
||||
spotTranscodedVideoUploadDuration.Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
spotOriginalVideoSize,
|
||||
spotCroppedVideoSize,
|
||||
spotVideosTotal,
|
||||
spotVideosCropped,
|
||||
spotVideosTranscoded,
|
||||
spotOriginalVideoDownloadDuration,
|
||||
spotCroppingDuration,
|
||||
spotCroppedVideoUploadDuration,
|
||||
spotTranscodingDuration,
|
||||
spotTranscodedVideoUploadDuration,
|
||||
}
|
||||
}
|
||||
func (s *spotImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
||||
func (s *spotImpl) RecordOriginalVideoSize(size float64) {}
|
||||
func (s *spotImpl) RecordCroppedVideoSize(size float64) {}
|
||||
func (s *spotImpl) IncreaseVideosTotal() {}
|
||||
func (s *spotImpl) IncreaseVideosCropped() {}
|
||||
func (s *spotImpl) IncreaseVideosTranscoded() {}
|
||||
func (s *spotImpl) RecordOriginalVideoDownloadDuration(durMillis float64) {}
|
||||
func (s *spotImpl) RecordCroppingDuration(durMillis float64) {}
|
||||
func (s *spotImpl) RecordCroppedVideoUploadDuration(durMillis float64) {}
|
||||
func (s *spotImpl) RecordTranscodingDuration(durMillis float64) {}
|
||||
func (s *spotImpl) RecordTranscodedVideoUploadDuration(durMillis float64) {}
|
||||
|
|
|
|||
|
|
@ -2,154 +2,34 @@ package storage
|
|||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
var storageSessionSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "session_size_bytes",
|
||||
Help: "A histogram displaying the size of each session file in bytes prior to any manipulation.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionSize(fileSize float64, fileType string) {
|
||||
storageSessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||
type Storage interface {
|
||||
RecordSessionSize(fileSize float64, fileType string)
|
||||
IncreaseStorageTotalSessions()
|
||||
RecordSkippedSessionSize(fileSize float64, fileType string)
|
||||
IncreaseStorageTotalSkippedSessions()
|
||||
RecordSessionReadDuration(durMillis float64, fileType string)
|
||||
RecordSessionSortDuration(durMillis float64, fileType string)
|
||||
RecordSessionEncryptionDuration(durMillis float64, fileType string)
|
||||
RecordSessionCompressDuration(durMillis float64, fileType string)
|
||||
RecordSessionUploadDuration(durMillis float64, fileType string)
|
||||
RecordSessionCompressionRatio(ratio float64, fileType string)
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
var storageTotalSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sessions_total",
|
||||
Help: "A counter displaying the total number of all processed sessions.",
|
||||
},
|
||||
)
|
||||
type storageImpl struct{}
|
||||
|
||||
func IncreaseStorageTotalSessions() {
|
||||
storageTotalSessions.Inc()
|
||||
}
|
||||
func New(serviceName string) Storage { return &storageImpl{} }
|
||||
|
||||
var storageSkippedSessionSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "session_size_bytes",
|
||||
Help: "A histogram displaying the size of each skipped session file in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSkippedSessionSize(fileSize float64, fileType string) {
|
||||
storageSkippedSessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||
}
|
||||
|
||||
var storageTotalSkippedSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sessions_skipped_total",
|
||||
Help: "A counter displaying the total number of all skipped sessions because of the size limits.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseStorageTotalSkippedSessions() {
|
||||
storageTotalSkippedSessions.Inc()
|
||||
}
|
||||
|
||||
var storageSessionReadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "read_duration_seconds",
|
||||
Help: "A histogram displaying the duration of reading for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionReadDuration(durMillis float64, fileType string) {
|
||||
storageSessionReadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionSortDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sort_duration_seconds",
|
||||
Help: "A histogram displaying the duration of sorting for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionSortDuration(durMillis float64, fileType string) {
|
||||
storageSessionSortDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionEncryptionDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "encryption_duration_seconds",
|
||||
Help: "A histogram displaying the duration of encoding for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionEncryptionDuration(durMillis float64, fileType string) {
|
||||
storageSessionEncryptionDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionCompressDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "compress_duration_seconds",
|
||||
Help: "A histogram displaying the duration of compressing for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionCompressDuration(durMillis float64, fileType string) {
|
||||
storageSessionCompressDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionUploadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "upload_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading to s3 for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionUploadDuration(durMillis float64, fileType string) {
|
||||
storageSessionUploadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionCompressionRatio = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "compression_ratio",
|
||||
Help: "A histogram displaying the compression ratio of mob files for each session.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionCompressionRatio(ratio float64, fileType string) {
|
||||
storageSessionCompressionRatio.WithLabelValues(fileType).Observe(ratio)
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
storageSessionSize,
|
||||
storageTotalSessions,
|
||||
storageSessionReadDuration,
|
||||
storageSessionSortDuration,
|
||||
storageSessionEncryptionDuration,
|
||||
storageSessionCompressDuration,
|
||||
storageSessionUploadDuration,
|
||||
storageSessionCompressionRatio,
|
||||
}
|
||||
}
|
||||
func (s *storageImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
||||
func (s *storageImpl) RecordSessionSize(fileSize float64, fileType string) {}
|
||||
func (s *storageImpl) IncreaseStorageTotalSessions() {}
|
||||
func (s *storageImpl) RecordSkippedSessionSize(fileSize float64, fileType string) {}
|
||||
func (s *storageImpl) IncreaseStorageTotalSkippedSessions() {}
|
||||
func (s *storageImpl) RecordSessionReadDuration(durMillis float64, fileType string) {}
|
||||
func (s *storageImpl) RecordSessionSortDuration(durMillis float64, fileType string) {}
|
||||
func (s *storageImpl) RecordSessionEncryptionDuration(durMillis float64, fileType string) {}
|
||||
func (s *storageImpl) RecordSessionCompressDuration(durMillis float64, fileType string) {}
|
||||
func (s *storageImpl) RecordSessionUploadDuration(durMillis float64, fileType string) {}
|
||||
func (s *storageImpl) RecordSessionCompressionRatio(ratio float64, fileType string) {}
|
||||
|
|
|
|||
|
|
@ -1,155 +0,0 @@
|
|||
package videostorage
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
var storageSessionSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "session_size_bytes",
|
||||
Help: "A histogram displaying the size of each session file in bytes prior to any manipulation.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionSize(fileSize float64, fileType string) {
|
||||
storageSessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||
}
|
||||
|
||||
var storageTotalSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sessions_total",
|
||||
Help: "A counter displaying the total number of all processed sessions.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseStorageTotalSessions() {
|
||||
storageTotalSessions.Inc()
|
||||
}
|
||||
|
||||
var storageSkippedSessionSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "session_size_bytes",
|
||||
Help: "A histogram displaying the size of each skipped session file in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSkippedSessionSize(fileSize float64, fileType string) {
|
||||
storageSkippedSessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||
}
|
||||
|
||||
var storageTotalSkippedSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sessions_skipped_total",
|
||||
Help: "A counter displaying the total number of all skipped sessions because of the size limits.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseStorageTotalSkippedSessions() {
|
||||
storageTotalSkippedSessions.Inc()
|
||||
}
|
||||
|
||||
var storageSessionReadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "read_duration_seconds",
|
||||
Help: "A histogram displaying the duration of reading for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionReadDuration(durMillis float64, fileType string) {
|
||||
storageSessionReadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionSortDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sort_duration_seconds",
|
||||
Help: "A histogram displaying the duration of sorting for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionSortDuration(durMillis float64, fileType string) {
|
||||
storageSessionSortDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionEncryptionDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "encryption_duration_seconds",
|
||||
Help: "A histogram displaying the duration of encoding for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionEncryptionDuration(durMillis float64, fileType string) {
|
||||
storageSessionEncryptionDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionCompressDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "compress_duration_seconds",
|
||||
Help: "A histogram displaying the duration of compressing for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionCompressDuration(durMillis float64, fileType string) {
|
||||
storageSessionCompressDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionUploadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "upload_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading to s3 for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionUploadDuration(durMillis float64, fileType string) {
|
||||
storageSessionUploadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionCompressionRatio = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "compression_ratio",
|
||||
Help: "A histogram displaying the compression ratio of mob files for each session.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionCompressionRatio(ratio float64, fileType string) {
|
||||
storageSessionCompressionRatio.WithLabelValues(fileType).Observe(ratio)
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
storageSessionSize,
|
||||
storageTotalSessions,
|
||||
storageSessionReadDuration,
|
||||
storageSessionSortDuration,
|
||||
storageSessionEncryptionDuration,
|
||||
storageSessionCompressDuration,
|
||||
storageSessionUploadDuration,
|
||||
storageSessionCompressionRatio,
|
||||
}
|
||||
}
|
||||
|
|
@ -1,11 +1,7 @@
|
|||
package web
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
type Web interface {
|
||||
|
|
@ -15,70 +11,11 @@ type Web interface {
|
|||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
type webImpl struct {
|
||||
httpRequestSize *prometheus.HistogramVec
|
||||
httpRequestDuration *prometheus.HistogramVec
|
||||
httpTotalRequests prometheus.Counter
|
||||
}
|
||||
type webImpl struct{}
|
||||
|
||||
func New(serviceName string) Web {
|
||||
return &webImpl{
|
||||
httpRequestSize: newRequestSizeMetric(serviceName),
|
||||
httpRequestDuration: newRequestDurationMetric(serviceName),
|
||||
httpTotalRequests: newTotalRequestsMetric(serviceName),
|
||||
}
|
||||
}
|
||||
func New(serviceName string) Web { return &webImpl{} }
|
||||
|
||||
func (w *webImpl) List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
w.httpRequestSize,
|
||||
w.httpRequestDuration,
|
||||
w.httpTotalRequests,
|
||||
}
|
||||
}
|
||||
|
||||
func newRequestSizeMetric(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "request_size_bytes",
|
||||
Help: "A histogram displaying the size of each HTTP request in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"url", "response_code"},
|
||||
)
|
||||
}
|
||||
|
||||
func (w *webImpl) RecordRequestSize(size float64, url string, code int) {
|
||||
w.httpRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size)
|
||||
}
|
||||
|
||||
func newRequestDurationMetric(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "request_duration_seconds",
|
||||
Help: "A histogram displaying the duration of each HTTP request in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"url", "response_code"},
|
||||
)
|
||||
}
|
||||
|
||||
func (w *webImpl) RecordRequestDuration(durMillis float64, url string, code int) {
|
||||
w.httpRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func newTotalRequestsMetric(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "requests_total",
|
||||
Help: "A counter displaying the number all HTTP requests.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (w *webImpl) IncreaseTotalRequests() {
|
||||
w.httpTotalRequests.Inc()
|
||||
}
|
||||
func (w *webImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
||||
func (w *webImpl) RecordRequestSize(size float64, url string, code int) {}
|
||||
func (w *webImpl) RecordRequestDuration(durMillis float64, url string, code int) {}
|
||||
func (w *webImpl) IncreaseTotalRequests() {}
|
||||
|
|
|
|||
|
|
@ -11,6 +11,8 @@ import (
|
|||
"openreplay/backend/pkg/metrics/database"
|
||||
)
|
||||
|
||||
var ErrDisabledCache = errors.New("cache is disabled")
|
||||
|
||||
type Cache interface {
|
||||
Set(project *Project) error
|
||||
GetByID(projectID uint32) (*Project, error)
|
||||
|
|
@ -18,10 +20,16 @@ type Cache interface {
|
|||
}
|
||||
|
||||
type cacheImpl struct {
|
||||
db *redis.Client
|
||||
db *redis.Client
|
||||
metrics database.Database
|
||||
}
|
||||
|
||||
var ErrDisabledCache = errors.New("cache is disabled")
|
||||
func NewCache(db *redis.Client, metrics database.Database) Cache {
|
||||
return &cacheImpl{
|
||||
db: db,
|
||||
metrics: metrics,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cacheImpl) Set(project *Project) error {
|
||||
if c.db == nil {
|
||||
|
|
@ -38,8 +46,8 @@ func (c *cacheImpl) Set(project *Project) error {
|
|||
if _, err = c.db.Redis.Set(fmt.Sprintf("project:key:%s", project.ProjectKey), projectBytes, time.Minute*10).Result(); err != nil {
|
||||
return err
|
||||
}
|
||||
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "set", "project")
|
||||
database.IncreaseRedisRequests("set", "project")
|
||||
c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "set", "project")
|
||||
c.metrics.IncreaseRedisRequests("set", "project")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -52,8 +60,8 @@ func (c *cacheImpl) GetByID(projectID uint32) (*Project, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "project")
|
||||
database.IncreaseRedisRequests("get", "project")
|
||||
c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "project")
|
||||
c.metrics.IncreaseRedisRequests("get", "project")
|
||||
project := &Project{}
|
||||
if err = json.Unmarshal([]byte(result), project); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -70,15 +78,11 @@ func (c *cacheImpl) GetByKey(projectKey string) (*Project, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "project")
|
||||
database.IncreaseRedisRequests("get", "project")
|
||||
c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "project")
|
||||
c.metrics.IncreaseRedisRequests("get", "project")
|
||||
project := &Project{}
|
||||
if err = json.Unmarshal([]byte(result), project); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return project, nil
|
||||
}
|
||||
|
||||
func NewCache(db *redis.Client) Cache {
|
||||
return &cacheImpl{db: db}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package projects
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/cache"
|
||||
|
|
@ -24,8 +25,8 @@ type projectsImpl struct {
|
|||
projectsByKeys cache.Cache
|
||||
}
|
||||
|
||||
func New(log logger.Logger, db pool.Pool, redis *redis.Client) Projects {
|
||||
cl := NewCache(redis)
|
||||
func New(log logger.Logger, db pool.Pool, redis *redis.Client, metrics database.Database) Projects {
|
||||
cl := NewCache(redis, metrics)
|
||||
return &projectsImpl{
|
||||
log: log,
|
||||
db: db,
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package tracer
|
|||
|
||||
import (
|
||||
"net/http"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
|
||||
db "openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/logger"
|
||||
|
|
@ -14,7 +15,7 @@ type Tracer interface {
|
|||
|
||||
type tracerImpl struct{}
|
||||
|
||||
func NewTracer(log logger.Logger, conn db.Pool) (Tracer, error) {
|
||||
func NewTracer(log logger.Logger, conn db.Pool, metrics database.Database) (Tracer, error) {
|
||||
return &tracerImpl{}, nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package sessions
|
|||
import (
|
||||
"errors"
|
||||
"openreplay/backend/pkg/db/redis"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
)
|
||||
|
||||
type cacheImpl struct{}
|
||||
|
|
@ -25,6 +26,6 @@ func (c *cacheImpl) Get(sessionID uint64) (*Session, error) {
|
|||
|
||||
var ErrDisabledCache = errors.New("cache is disabled")
|
||||
|
||||
func NewCache(db *redis.Client) Cache {
|
||||
func NewCache(db *redis.Client, metrics database.Database) Cache {
|
||||
return &cacheImpl{}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package sessions
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/db/redis"
|
||||
|
|
@ -38,12 +39,12 @@ type sessionsImpl struct {
|
|||
projects projects.Projects
|
||||
}
|
||||
|
||||
func New(log logger.Logger, db pool.Pool, proj projects.Projects, redis *redis.Client) Sessions {
|
||||
func New(log logger.Logger, db pool.Pool, proj projects.Projects, redis *redis.Client, metrics database.Database) Sessions {
|
||||
return &sessionsImpl{
|
||||
log: log,
|
||||
cache: NewInMemoryCache(log, NewCache(redis)),
|
||||
cache: NewInMemoryCache(log, NewCache(redis, metrics)),
|
||||
storage: NewStorage(db),
|
||||
updates: NewSessionUpdates(log, db),
|
||||
updates: NewSessionUpdates(log, db, metrics),
|
||||
projects: proj,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,13 +27,15 @@ type updatesImpl struct {
|
|||
log logger.Logger
|
||||
db pool.Pool
|
||||
updates map[uint64]*sessionUpdate
|
||||
metrics database.Database
|
||||
}
|
||||
|
||||
func NewSessionUpdates(log logger.Logger, db pool.Pool) Updates {
|
||||
func NewSessionUpdates(log logger.Logger, db pool.Pool, metrics database.Database) Updates {
|
||||
return &updatesImpl{
|
||||
log: log,
|
||||
db: db,
|
||||
updates: make(map[uint64]*sessionUpdate),
|
||||
metrics: metrics,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -94,7 +96,7 @@ func (u *updatesImpl) Commit() {
|
|||
}
|
||||
}
|
||||
// Record batch size
|
||||
database.RecordBatchElements(float64(b.Len()))
|
||||
u.metrics.RecordBatchElements(float64(b.Len()))
|
||||
|
||||
start := time.Now()
|
||||
|
||||
|
|
@ -121,7 +123,7 @@ func (u *updatesImpl) Commit() {
|
|||
}
|
||||
}
|
||||
}
|
||||
database.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds()))
|
||||
u.metrics.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds()))
|
||||
u.updates = make(map[uint64]*sessionUpdate)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,12 +1,14 @@
|
|||
package spot
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/config/spot"
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
"openreplay/backend/pkg/logger"
|
||||
spotMetrics "openreplay/backend/pkg/metrics/spot"
|
||||
"openreplay/backend/pkg/metrics/web"
|
||||
"openreplay/backend/pkg/objectstorage/store"
|
||||
"openreplay/backend/pkg/server/api"
|
||||
|
|
@ -26,16 +28,16 @@ type ServicesBuilder struct {
|
|||
SpotsAPI api.Handlers
|
||||
}
|
||||
|
||||
func NewServiceBuilder(log logger.Logger, cfg *spot.Config, webMetrics web.Web, pgconn pool.Pool, prefix string) (*ServicesBuilder, error) {
|
||||
func NewServiceBuilder(log logger.Logger, cfg *spot.Config, webMetrics web.Web, spotMetrics spotMetrics.Spot, dbMetrics database.Database, pgconn pool.Pool, prefix string) (*ServicesBuilder, error) {
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
flaker := flakeid.NewFlaker(cfg.WorkerID)
|
||||
spots := service.NewSpots(log, pgconn, flaker)
|
||||
transcoder := transcoder.NewTranscoder(cfg, log, objStore, pgconn, spots)
|
||||
transcoder := transcoder.NewTranscoder(cfg, log, objStore, pgconn, spots, spotMetrics)
|
||||
keys := keys.NewKeys(log, pgconn)
|
||||
auditrail, err := tracer.NewTracer(log, pgconn)
|
||||
auditrail, err := tracer.NewTracer(log, pgconn, dbMetrics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ import (
|
|||
"openreplay/backend/internal/config/spot"
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/logger"
|
||||
metrics "openreplay/backend/pkg/metrics/spot"
|
||||
spotMetrics "openreplay/backend/pkg/metrics/spot"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
workers "openreplay/backend/pkg/pool"
|
||||
"openreplay/backend/pkg/spot/service"
|
||||
|
|
@ -39,9 +39,10 @@ type transcoderImpl struct {
|
|||
spots service.Spots
|
||||
prepareWorkers workers.WorkerPool
|
||||
transcodeWorkers workers.WorkerPool
|
||||
metrics spotMetrics.Spot
|
||||
}
|
||||
|
||||
func NewTranscoder(cfg *spot.Config, log logger.Logger, objStorage objectstorage.ObjectStorage, conn pool.Pool, spots service.Spots) Transcoder {
|
||||
func NewTranscoder(cfg *spot.Config, log logger.Logger, objStorage objectstorage.ObjectStorage, conn pool.Pool, spots service.Spots, metrics spotMetrics.Spot) Transcoder {
|
||||
tnsc := &transcoderImpl{
|
||||
cfg: cfg,
|
||||
log: log,
|
||||
|
|
@ -114,7 +115,7 @@ func (t *transcoderImpl) doneTask(task *Task) {
|
|||
}
|
||||
|
||||
func (t *transcoderImpl) process(task *Task) {
|
||||
metrics.IncreaseVideosTotal()
|
||||
t.metrics.IncreaseVideosTotal()
|
||||
//spotID := task.SpotID
|
||||
t.log.Info(context.Background(), "Processing spot %s", task.SpotID)
|
||||
|
||||
|
|
@ -200,11 +201,11 @@ func (t *transcoderImpl) downloadSpotVideo(spotID uint64, path string) error {
|
|||
if fileInfo, err := originVideo.Stat(); err != nil {
|
||||
t.log.Error(context.Background(), "Failed to get file info: %v", err)
|
||||
} else {
|
||||
metrics.RecordOriginalVideoSize(float64(fileInfo.Size()))
|
||||
t.metrics.RecordOriginalVideoSize(float64(fileInfo.Size()))
|
||||
}
|
||||
originVideo.Close()
|
||||
|
||||
metrics.RecordOriginalVideoDownloadDuration(time.Since(start).Seconds())
|
||||
t.metrics.RecordOriginalVideoDownloadDuration(time.Since(start).Seconds())
|
||||
|
||||
t.log.Info(context.Background(), "Saved origin video to disk, spot: %d in %v sec", spotID, time.Since(start).Seconds())
|
||||
return nil
|
||||
|
|
@ -227,8 +228,8 @@ func (t *transcoderImpl) cropSpotVideo(spotID uint64, crop []int, path string) e
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to execute command: %v, stderr: %v", err, stderr.String())
|
||||
}
|
||||
metrics.IncreaseVideosCropped()
|
||||
metrics.RecordCroppingDuration(time.Since(start).Seconds())
|
||||
t.metrics.IncreaseVideosCropped()
|
||||
t.metrics.RecordCroppingDuration(time.Since(start).Seconds())
|
||||
|
||||
t.log.Info(context.Background(), "Cropped spot %d in %v", spotID, time.Since(start).Seconds())
|
||||
|
||||
|
|
@ -246,7 +247,7 @@ func (t *transcoderImpl) cropSpotVideo(spotID uint64, crop []int, path string) e
|
|||
if fileInfo, err := video.Stat(); err != nil {
|
||||
t.log.Error(context.Background(), "Failed to get file info: %v", err)
|
||||
} else {
|
||||
metrics.RecordCroppedVideoSize(float64(fileInfo.Size()))
|
||||
t.metrics.RecordCroppedVideoSize(float64(fileInfo.Size()))
|
||||
}
|
||||
|
||||
err = t.objStorage.Upload(video, fmt.Sprintf("%d/video.webm", spotID), "video/webm", objectstorage.NoContentEncoding, objectstorage.NoCompression)
|
||||
|
|
@ -254,7 +255,7 @@ func (t *transcoderImpl) cropSpotVideo(spotID uint64, crop []int, path string) e
|
|||
return fmt.Errorf("failed to upload cropped video: %v", err)
|
||||
}
|
||||
|
||||
metrics.RecordCroppedVideoUploadDuration(time.Since(start).Seconds())
|
||||
t.metrics.RecordCroppedVideoUploadDuration(time.Since(start).Seconds())
|
||||
|
||||
t.log.Info(context.Background(), "Uploaded cropped spot %d in %v", spotID, time.Since(start).Seconds())
|
||||
return nil
|
||||
|
|
@ -279,8 +280,8 @@ func (t *transcoderImpl) transcodeSpotVideo(spotID uint64, path string) (string,
|
|||
t.log.Error(context.Background(), "Failed to execute command: %v, stderr: %v", err, stderr.String())
|
||||
return "", err
|
||||
}
|
||||
metrics.IncreaseVideosTranscoded()
|
||||
metrics.RecordTranscodingDuration(time.Since(start).Seconds())
|
||||
t.metrics.IncreaseVideosTranscoded()
|
||||
t.metrics.RecordTranscodingDuration(time.Since(start).Seconds())
|
||||
t.log.Info(context.Background(), "Transcoded spot %d in %v", spotID, time.Since(start).Seconds())
|
||||
|
||||
start = time.Now()
|
||||
|
|
@ -327,7 +328,7 @@ func (t *transcoderImpl) transcodeSpotVideo(spotID uint64, path string) (string,
|
|||
return "", err
|
||||
}
|
||||
}
|
||||
metrics.RecordTranscodedVideoUploadDuration(time.Since(start).Seconds())
|
||||
t.metrics.RecordTranscodedVideoUploadDuration(time.Since(start).Seconds())
|
||||
|
||||
t.log.Info(context.Background(), "Uploaded chunks for spot %d in %v", spotID, time.Since(start).Seconds())
|
||||
return strings.Join(lines, "\n"), nil
|
||||
|
|
|
|||
106
ee/backend/pkg/metrics/assets/metrics.go
Normal file
106
ee/backend/pkg/metrics/assets/metrics.go
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
package assets
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
type Assets interface {
|
||||
IncreaseProcessesSessions()
|
||||
IncreaseSavedSessions()
|
||||
RecordDownloadDuration(durMillis float64, code int)
|
||||
RecordUploadDuration(durMillis float64, isFailed bool)
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
type assetsImpl struct {
|
||||
assetsProcessedSessions prometheus.Counter
|
||||
assetsSavedSessions prometheus.Counter
|
||||
assetsDownloadDuration *prometheus.HistogramVec
|
||||
assetsUploadDuration *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
func New(serviceName string) Assets {
|
||||
return &assetsImpl{
|
||||
assetsProcessedSessions: newProcessedSessions(serviceName),
|
||||
assetsSavedSessions: newSavedSessions(serviceName),
|
||||
assetsDownloadDuration: newDownloadDuration(serviceName),
|
||||
assetsUploadDuration: newUploadDuration(serviceName),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *assetsImpl) List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
a.assetsProcessedSessions,
|
||||
a.assetsSavedSessions,
|
||||
a.assetsDownloadDuration,
|
||||
a.assetsUploadDuration,
|
||||
}
|
||||
}
|
||||
|
||||
func newProcessedSessions(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "processed_total",
|
||||
Help: "A counter displaying the total count of processed assets.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (a *assetsImpl) IncreaseProcessesSessions() {
|
||||
a.assetsProcessedSessions.Inc()
|
||||
}
|
||||
|
||||
func newSavedSessions(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "saved_total",
|
||||
Help: "A counter displaying the total number of cached assets.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (a *assetsImpl) IncreaseSavedSessions() {
|
||||
a.assetsSavedSessions.Inc()
|
||||
}
|
||||
|
||||
func newDownloadDuration(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "download_duration_seconds",
|
||||
Help: "A histogram displaying the duration of downloading for each asset in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"response_code"},
|
||||
)
|
||||
}
|
||||
|
||||
func (a *assetsImpl) RecordDownloadDuration(durMillis float64, code int) {
|
||||
a.assetsDownloadDuration.WithLabelValues(strconv.Itoa(code)).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func newUploadDuration(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "upload_s3_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading to s3 for each asset in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"failed"},
|
||||
)
|
||||
}
|
||||
|
||||
func (a *assetsImpl) RecordUploadDuration(durMillis float64, isFailed bool) {
|
||||
failed := "false"
|
||||
if isFailed {
|
||||
failed = "true"
|
||||
}
|
||||
a.assetsUploadDuration.WithLabelValues(failed).Observe(durMillis / 1000.0)
|
||||
}
|
||||
193
ee/backend/pkg/metrics/canvas/metrics.go
Normal file
193
ee/backend/pkg/metrics/canvas/metrics.go
Normal file
|
|
@ -0,0 +1,193 @@
|
|||
package canvas
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
type Canvas interface {
|
||||
RecordCanvasImageSize(size float64)
|
||||
IncreaseTotalSavedImages()
|
||||
RecordImagesPerCanvas(number float64)
|
||||
RecordCanvasesPerSession(number float64)
|
||||
RecordPreparingDuration(duration float64)
|
||||
IncreaseTotalCreatedArchives()
|
||||
RecordArchivingDuration(duration float64)
|
||||
RecordArchiveSize(size float64)
|
||||
RecordUploadingDuration(duration float64)
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
type canvasImpl struct {
|
||||
canvasesImageSize prometheus.Histogram
|
||||
canvasesTotalSavedImages prometheus.Counter
|
||||
canvasesImagesPerCanvas prometheus.Histogram
|
||||
canvasesCanvasesPerSession prometheus.Histogram
|
||||
canvasesPreparingDuration prometheus.Histogram
|
||||
canvasesTotalCreatedArchives prometheus.Counter
|
||||
canvasesArchivingDuration prometheus.Histogram
|
||||
canvasesArchiveSize prometheus.Histogram
|
||||
canvasesUploadingDuration prometheus.Histogram
|
||||
}
|
||||
|
||||
func New(serviceName string) Canvas {
|
||||
return &canvasImpl{
|
||||
canvasesImageSize: newImageSizeMetric(serviceName),
|
||||
canvasesTotalSavedImages: newTotalSavedImages(serviceName),
|
||||
canvasesImagesPerCanvas: newImagesPerCanvas(serviceName),
|
||||
canvasesCanvasesPerSession: newCanvasesPerSession(serviceName),
|
||||
canvasesPreparingDuration: newPreparingDuration(serviceName),
|
||||
canvasesTotalCreatedArchives: newTotalCreatedArchives(serviceName),
|
||||
canvasesArchivingDuration: newArchivingDuration(serviceName),
|
||||
canvasesArchiveSize: newArchiveSize(serviceName),
|
||||
canvasesUploadingDuration: newUploadingDuration(serviceName),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *canvasImpl) List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
c.canvasesImageSize,
|
||||
c.canvasesTotalSavedImages,
|
||||
c.canvasesImagesPerCanvas,
|
||||
c.canvasesCanvasesPerSession,
|
||||
c.canvasesPreparingDuration,
|
||||
c.canvasesTotalCreatedArchives,
|
||||
c.canvasesArchivingDuration,
|
||||
c.canvasesArchiveSize,
|
||||
c.canvasesUploadingDuration,
|
||||
}
|
||||
}
|
||||
|
||||
func newImageSizeMetric(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "image_size_bytes",
|
||||
Help: "A histogram displaying the size of each canvas image in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) RecordCanvasImageSize(size float64) {
|
||||
c.canvasesImageSize.Observe(size)
|
||||
}
|
||||
|
||||
func newTotalSavedImages(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "total_saved_images",
|
||||
Help: "A counter displaying the total number of saved images.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) IncreaseTotalSavedImages() {
|
||||
c.canvasesTotalSavedImages.Inc()
|
||||
}
|
||||
|
||||
func newImagesPerCanvas(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "images_per_canvas",
|
||||
Help: "A histogram displaying the number of images per canvas.",
|
||||
Buckets: common.DefaultBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) RecordImagesPerCanvas(number float64) {
|
||||
c.canvasesImagesPerCanvas.Observe(number)
|
||||
}
|
||||
|
||||
func newCanvasesPerSession(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "canvases_per_session",
|
||||
Help: "A histogram displaying the number of canvases per session.",
|
||||
Buckets: common.DefaultBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) RecordCanvasesPerSession(number float64) {
|
||||
c.canvasesCanvasesPerSession.Observe(number)
|
||||
}
|
||||
|
||||
func newPreparingDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "preparing_duration_seconds",
|
||||
Help: "A histogram displaying the duration of preparing the list of canvases for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) RecordPreparingDuration(duration float64) {
|
||||
c.canvasesPreparingDuration.Observe(duration)
|
||||
}
|
||||
|
||||
func newTotalCreatedArchives(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "total_created_archives",
|
||||
Help: "A counter displaying the total number of created canvas archives.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) IncreaseTotalCreatedArchives() {
|
||||
c.canvasesTotalCreatedArchives.Inc()
|
||||
}
|
||||
|
||||
func newArchivingDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "archiving_duration_seconds",
|
||||
Help: "A histogram displaying the duration of archiving for each canvas in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) RecordArchivingDuration(duration float64) {
|
||||
c.canvasesArchivingDuration.Observe(duration)
|
||||
}
|
||||
|
||||
func newArchiveSize(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "archive_size_bytes",
|
||||
Help: "A histogram displaying the size of each canvas archive in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) RecordArchiveSize(size float64) {
|
||||
c.canvasesArchiveSize.Observe(size)
|
||||
}
|
||||
|
||||
func newUploadingDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "uploading_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading for each canvas in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (c *canvasImpl) RecordUploadingDuration(duration float64) {
|
||||
c.canvasesUploadingDuration.Observe(duration)
|
||||
}
|
||||
200
ee/backend/pkg/metrics/database/metrics.go
Normal file
200
ee/backend/pkg/metrics/database/metrics.go
Normal file
|
|
@ -0,0 +1,200 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
type Database interface {
|
||||
RecordBatchElements(number float64)
|
||||
RecordBatchInsertDuration(durMillis float64)
|
||||
RecordBulkSize(size float64, db, table string)
|
||||
RecordBulkElements(size float64, db, table string)
|
||||
RecordBulkInsertDuration(durMillis float64, db, table string)
|
||||
RecordRequestDuration(durMillis float64, method, table string)
|
||||
IncreaseTotalRequests(method, table string)
|
||||
IncreaseRedisRequests(method, table string)
|
||||
RecordRedisRequestDuration(durMillis float64, method, table string)
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
type databaseImpl struct {
|
||||
dbBatchElements prometheus.Histogram
|
||||
dbBatchInsertDuration prometheus.Histogram
|
||||
dbBulkSize *prometheus.HistogramVec
|
||||
dbBulkElements *prometheus.HistogramVec
|
||||
dbBulkInsertDuration *prometheus.HistogramVec
|
||||
dbRequestDuration *prometheus.HistogramVec
|
||||
dbTotalRequests *prometheus.CounterVec
|
||||
cacheRedisRequests *prometheus.CounterVec
|
||||
cacheRedisRequestDuration *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
func New(serviceName string) Database {
|
||||
return &databaseImpl{
|
||||
dbBatchElements: newBatchElements(serviceName),
|
||||
dbBatchInsertDuration: newBatchInsertDuration(serviceName),
|
||||
dbBulkSize: newBulkSize(serviceName),
|
||||
dbBulkElements: newBulkElements(serviceName),
|
||||
dbBulkInsertDuration: newBulkInsertDuration(serviceName),
|
||||
dbRequestDuration: newRequestDuration(serviceName),
|
||||
dbTotalRequests: newTotalRequests(serviceName),
|
||||
cacheRedisRequests: newRedisRequests(serviceName),
|
||||
cacheRedisRequestDuration: newRedisRequestDuration(serviceName),
|
||||
}
|
||||
}
|
||||
|
||||
func (d *databaseImpl) List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
d.dbBatchElements,
|
||||
d.dbBatchInsertDuration,
|
||||
d.dbBulkSize,
|
||||
d.dbBulkElements,
|
||||
d.dbBulkInsertDuration,
|
||||
d.dbRequestDuration,
|
||||
d.dbTotalRequests,
|
||||
d.cacheRedisRequests,
|
||||
d.cacheRedisRequestDuration,
|
||||
}
|
||||
}
|
||||
|
||||
func newBatchElements(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "batch_size_elements",
|
||||
Help: "A histogram displaying the number of SQL commands in each batch.",
|
||||
Buckets: common.DefaultBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (d *databaseImpl) RecordBatchElements(number float64) {
|
||||
d.dbBatchElements.Observe(number)
|
||||
}
|
||||
|
||||
func newBatchInsertDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "batch_insert_duration_seconds",
|
||||
Help: "A histogram displaying the duration of batch inserts in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (d *databaseImpl) RecordBatchInsertDuration(durMillis float64) {
|
||||
d.dbBatchInsertDuration.Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func newBulkSize(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "bulk_size_bytes",
|
||||
Help: "A histogram displaying the bulk size in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"db", "table"},
|
||||
)
|
||||
}
|
||||
|
||||
func (d *databaseImpl) RecordBulkSize(size float64, db, table string) {
|
||||
d.dbBulkSize.WithLabelValues(db, table).Observe(size)
|
||||
}
|
||||
|
||||
func newBulkElements(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "bulk_size_elements",
|
||||
Help: "A histogram displaying the size of data set in each bulk.",
|
||||
Buckets: common.DefaultBuckets,
|
||||
},
|
||||
[]string{"db", "table"},
|
||||
)
|
||||
}
|
||||
|
||||
func (d *databaseImpl) RecordBulkElements(size float64, db, table string) {
|
||||
d.dbBulkElements.WithLabelValues(db, table).Observe(size)
|
||||
}
|
||||
|
||||
func newBulkInsertDuration(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "bulk_insert_duration_seconds",
|
||||
Help: "A histogram displaying the duration of bulk inserts in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"db", "table"},
|
||||
)
|
||||
}
|
||||
|
||||
func (d *databaseImpl) RecordBulkInsertDuration(durMillis float64, db, table string) {
|
||||
d.dbBulkInsertDuration.WithLabelValues(db, table).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func newRequestDuration(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "request_duration_seconds",
|
||||
Help: "A histogram displaying the duration of each sql request in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"method", "table"},
|
||||
)
|
||||
}
|
||||
|
||||
func (d *databaseImpl) RecordRequestDuration(durMillis float64, method, table string) {
|
||||
d.dbRequestDuration.WithLabelValues(method, table).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func newTotalRequests(serviceName string) *prometheus.CounterVec {
|
||||
return prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "requests_total",
|
||||
Help: "A counter showing the total number of all SQL requests.",
|
||||
},
|
||||
[]string{"method", "table"},
|
||||
)
|
||||
}
|
||||
|
||||
func (d *databaseImpl) IncreaseTotalRequests(method, table string) {
|
||||
d.dbTotalRequests.WithLabelValues(method, table).Inc()
|
||||
}
|
||||
|
||||
func newRedisRequests(serviceName string) *prometheus.CounterVec {
|
||||
return prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "redis_requests_total",
|
||||
Help: "A counter showing the total number of all Redis requests.",
|
||||
},
|
||||
[]string{"method", "table"},
|
||||
)
|
||||
}
|
||||
|
||||
func (d *databaseImpl) IncreaseRedisRequests(method, table string) {
|
||||
d.cacheRedisRequests.WithLabelValues(method, table).Inc()
|
||||
}
|
||||
|
||||
func newRedisRequestDuration(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "redis_request_duration_seconds",
|
||||
Help: "A histogram displaying the duration of each Redis request in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"method", "table"},
|
||||
)
|
||||
}
|
||||
|
||||
func (d *databaseImpl) RecordRedisRequestDuration(durMillis float64, method, table string) {
|
||||
d.cacheRedisRequestDuration.WithLabelValues(method, table).Observe(durMillis / 1000.0)
|
||||
}
|
||||
79
ee/backend/pkg/metrics/ender/metrics.go
Normal file
79
ee/backend/pkg/metrics/ender/metrics.go
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
package ender
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
type Ender interface {
|
||||
IncreaseActiveSessions()
|
||||
DecreaseActiveSessions()
|
||||
IncreaseClosedSessions()
|
||||
IncreaseTotalSessions()
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
type enderImpl struct {
|
||||
activeSessions prometheus.Gauge
|
||||
closedSessions prometheus.Counter
|
||||
totalSessions prometheus.Counter
|
||||
}
|
||||
|
||||
func New(serviceName string) Ender {
|
||||
return &enderImpl{
|
||||
activeSessions: newActiveSessions(serviceName),
|
||||
closedSessions: newClosedSessions(serviceName),
|
||||
totalSessions: newTotalSessions(serviceName),
|
||||
}
|
||||
}
|
||||
|
||||
func (e *enderImpl) List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
e.activeSessions,
|
||||
e.closedSessions,
|
||||
e.totalSessions,
|
||||
}
|
||||
}
|
||||
|
||||
func newActiveSessions(serviceName string) prometheus.Gauge {
|
||||
return prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "sessions_active",
|
||||
Help: "A gauge displaying the number of active (live) sessions.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (e *enderImpl) IncreaseActiveSessions() {
|
||||
e.activeSessions.Inc()
|
||||
}
|
||||
|
||||
func (e *enderImpl) DecreaseActiveSessions() {
|
||||
e.activeSessions.Dec()
|
||||
}
|
||||
|
||||
func newClosedSessions(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "sessions_closed",
|
||||
Help: "A counter displaying the number of closed sessions (sent SessionEnd).",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (e *enderImpl) IncreaseClosedSessions() {
|
||||
e.closedSessions.Inc()
|
||||
}
|
||||
|
||||
func newTotalSessions(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "sessions_total",
|
||||
Help: "A counter displaying the number of all processed sessions.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (e *enderImpl) IncreaseTotalSessions() {
|
||||
e.totalSessions.Inc()
|
||||
}
|
||||
41
ee/backend/pkg/metrics/heuristics/metrics.go
Normal file
41
ee/backend/pkg/metrics/heuristics/metrics.go
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
package heuristics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type Heuristics interface {
|
||||
IncreaseTotalEvents(eventType string)
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
type heuristicsImpl struct {
|
||||
totalEvents *prometheus.CounterVec
|
||||
}
|
||||
|
||||
func New(serviceName string) Heuristics {
|
||||
return &heuristicsImpl{
|
||||
totalEvents: newTotalEvents(serviceName),
|
||||
}
|
||||
}
|
||||
|
||||
func newTotalEvents(serviceName string) *prometheus.CounterVec {
|
||||
return prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "events_total",
|
||||
Help: "A counter displaying the number of all processed events",
|
||||
},
|
||||
[]string{"type"},
|
||||
)
|
||||
}
|
||||
|
||||
func (h *heuristicsImpl) IncreaseTotalEvents(eventType string) {
|
||||
h.totalEvents.WithLabelValues(eventType).Inc()
|
||||
}
|
||||
|
||||
func (h *heuristicsImpl) List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
h.totalEvents,
|
||||
}
|
||||
}
|
||||
192
ee/backend/pkg/metrics/images/metrics.go
Normal file
192
ee/backend/pkg/metrics/images/metrics.go
Normal file
|
|
@ -0,0 +1,192 @@
|
|||
package images
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
type Images interface {
|
||||
RecordOriginalArchiveSize(size float64)
|
||||
RecordOriginalArchiveExtractionDuration(duration float64)
|
||||
IncreaseTotalSavedArchives()
|
||||
RecordSavingImageDuration(duration float64)
|
||||
IncreaseTotalSavedImages()
|
||||
IncreaseTotalCreatedArchives()
|
||||
RecordArchivingDuration(duration float64)
|
||||
RecordArchiveSize(size float64)
|
||||
RecordUploadingDuration(duration float64)
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
type imagesImpl struct {
|
||||
originalArchiveSize prometheus.Histogram
|
||||
originalArchiveExtractionDuration prometheus.Histogram
|
||||
totalSavedArchives prometheus.Counter
|
||||
savingImageDuration prometheus.Histogram
|
||||
totalSavedImages prometheus.Counter
|
||||
totalCreatedArchives prometheus.Counter
|
||||
archivingDuration prometheus.Histogram
|
||||
archiveSize prometheus.Histogram
|
||||
uploadingDuration prometheus.Histogram
|
||||
}
|
||||
|
||||
func New(serviceName string) Images {
|
||||
return &imagesImpl{
|
||||
originalArchiveSize: newOriginalArchiveSize(serviceName),
|
||||
originalArchiveExtractionDuration: newOriginalArchiveExtractionDuration(serviceName),
|
||||
totalSavedArchives: newTotalSavedArchives(serviceName),
|
||||
savingImageDuration: newSavingImageDuration(serviceName),
|
||||
totalSavedImages: newTotalSavedImages(serviceName),
|
||||
totalCreatedArchives: newTotalCreatedArchives(serviceName),
|
||||
archivingDuration: newArchivingDuration(serviceName),
|
||||
archiveSize: newArchiveSize(serviceName),
|
||||
uploadingDuration: newUploadingDuration(serviceName),
|
||||
}
|
||||
}
|
||||
|
||||
func (i *imagesImpl) List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
i.originalArchiveSize,
|
||||
i.originalArchiveExtractionDuration,
|
||||
i.totalSavedArchives,
|
||||
i.savingImageDuration,
|
||||
i.totalSavedImages,
|
||||
i.totalCreatedArchives,
|
||||
i.archivingDuration,
|
||||
i.archiveSize,
|
||||
i.uploadingDuration,
|
||||
}
|
||||
}
|
||||
|
||||
func newOriginalArchiveSize(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "original_archive_size_bytes",
|
||||
Help: "A histogram displaying the original archive size in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) RecordOriginalArchiveSize(size float64) {
|
||||
i.archiveSize.Observe(size)
|
||||
}
|
||||
|
||||
func newOriginalArchiveExtractionDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "original_archive_extraction_duration_seconds",
|
||||
Help: "A histogram displaying the duration of extracting the original archive.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) RecordOriginalArchiveExtractionDuration(duration float64) {
|
||||
i.originalArchiveExtractionDuration.Observe(duration)
|
||||
}
|
||||
|
||||
func newTotalSavedArchives(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "total_saved_archives",
|
||||
Help: "A counter displaying the total number of saved original archives.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) IncreaseTotalSavedArchives() {
|
||||
i.totalSavedArchives.Inc()
|
||||
}
|
||||
|
||||
func newSavingImageDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "saving_image_duration_seconds",
|
||||
Help: "A histogram displaying the duration of saving each image in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) RecordSavingImageDuration(duration float64) {
|
||||
i.savingImageDuration.Observe(duration)
|
||||
}
|
||||
|
||||
func newTotalSavedImages(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "total_saved_images",
|
||||
Help: "A counter displaying the total number of saved images.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) IncreaseTotalSavedImages() {
|
||||
i.totalSavedImages.Inc()
|
||||
}
|
||||
|
||||
func newTotalCreatedArchives(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "total_created_archives",
|
||||
Help: "A counter displaying the total number of created archives.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) IncreaseTotalCreatedArchives() {
|
||||
i.totalCreatedArchives.Inc()
|
||||
}
|
||||
|
||||
func newArchivingDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "archiving_duration_seconds",
|
||||
Help: "A histogram displaying the duration of archiving each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) RecordArchivingDuration(duration float64) {
|
||||
i.archivingDuration.Observe(duration)
|
||||
}
|
||||
|
||||
func newArchiveSize(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "archive_size_bytes",
|
||||
Help: "A histogram displaying the session's archive size in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) RecordArchiveSize(size float64) {
|
||||
i.archiveSize.Observe(size)
|
||||
}
|
||||
|
||||
func newUploadingDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "uploading_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading each session's archive to S3 in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (i *imagesImpl) RecordUploadingDuration(duration float64) {
|
||||
i.uploadingDuration.Observe(duration)
|
||||
}
|
||||
38
ee/backend/pkg/metrics/server.go
Normal file
38
ee/backend/pkg/metrics/server.go
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
type MetricServer struct {
|
||||
registry *prometheus.Registry
|
||||
}
|
||||
|
||||
func New(log logger.Logger, cs []prometheus.Collector) {
|
||||
registry := prometheus.NewRegistry()
|
||||
// Add go runtime metrics and process collectors.
|
||||
registry.MustRegister(
|
||||
collectors.NewGoCollector(),
|
||||
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
|
||||
)
|
||||
// Add extra metrics
|
||||
registry.MustRegister(cs...)
|
||||
// Expose /metrics HTTP endpoint using the created custom registry.
|
||||
http.Handle(
|
||||
"/metrics", promhttp.HandlerFor(
|
||||
registry,
|
||||
promhttp.HandlerOpts{
|
||||
EnableOpenMetrics: true,
|
||||
}),
|
||||
)
|
||||
go func() {
|
||||
log.Error(context.Background(), "%v", http.ListenAndServe(":8888", nil))
|
||||
}()
|
||||
}
|
||||
259
ee/backend/pkg/metrics/sink/metrics.go
Normal file
259
ee/backend/pkg/metrics/sink/metrics.go
Normal file
|
|
@ -0,0 +1,259 @@
|
|||
package sink
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
type Sink interface {
|
||||
RecordMessageSize(size float64)
|
||||
IncreaseWrittenMessages()
|
||||
IncreaseTotalMessages()
|
||||
RecordBatchSize(size float64)
|
||||
IncreaseTotalBatches()
|
||||
RecordWrittenBytes(size float64, fileType string)
|
||||
IncreaseTotalWrittenBytes(size float64, fileType string)
|
||||
IncreaseCachedAssets()
|
||||
DecreaseCachedAssets()
|
||||
IncreaseSkippedAssets()
|
||||
IncreaseTotalAssets()
|
||||
RecordAssetSize(size float64)
|
||||
RecordProcessAssetDuration(durMillis float64)
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
type sinkImpl struct {
|
||||
messageSize prometheus.Histogram
|
||||
writtenMessages prometheus.Counter
|
||||
totalMessages prometheus.Counter
|
||||
batchSize prometheus.Histogram
|
||||
totalBatches prometheus.Counter
|
||||
writtenBytes *prometheus.HistogramVec
|
||||
totalWrittenBytes *prometheus.CounterVec
|
||||
cachedAssets prometheus.Gauge
|
||||
skippedAssets prometheus.Counter
|
||||
totalAssets prometheus.Counter
|
||||
assetSize prometheus.Histogram
|
||||
processAssetDuration prometheus.Histogram
|
||||
}
|
||||
|
||||
func New(serviceName string) Sink {
|
||||
return &sinkImpl{
|
||||
messageSize: newMessageSize(serviceName),
|
||||
writtenMessages: newWrittenMessages(serviceName),
|
||||
totalMessages: newTotalMessages(serviceName),
|
||||
batchSize: newBatchSize(serviceName),
|
||||
totalBatches: newTotalBatches(serviceName),
|
||||
writtenBytes: newWrittenBytes(serviceName),
|
||||
totalWrittenBytes: newTotalWrittenBytes(serviceName),
|
||||
cachedAssets: newCachedAssets(serviceName),
|
||||
skippedAssets: newSkippedAssets(serviceName),
|
||||
totalAssets: newTotalAssets(serviceName),
|
||||
assetSize: newAssetSize(serviceName),
|
||||
processAssetDuration: newProcessAssetDuration(serviceName),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sinkImpl) List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
s.messageSize,
|
||||
s.writtenMessages,
|
||||
s.totalMessages,
|
||||
s.batchSize,
|
||||
s.totalBatches,
|
||||
s.writtenBytes,
|
||||
s.totalWrittenBytes,
|
||||
s.cachedAssets,
|
||||
s.skippedAssets,
|
||||
s.totalAssets,
|
||||
s.assetSize,
|
||||
s.processAssetDuration,
|
||||
}
|
||||
}
|
||||
|
||||
func newMessageSize(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "message_size_bytes",
|
||||
Help: "A histogram displaying the size of each message in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *sinkImpl) RecordMessageSize(size float64) {
|
||||
s.messageSize.Observe(size)
|
||||
}
|
||||
|
||||
func newWrittenMessages(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "messages_written",
|
||||
Help: "A counter displaying the total number of all written messages.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *sinkImpl) IncreaseWrittenMessages() {
|
||||
s.writtenMessages.Inc()
|
||||
}
|
||||
|
||||
func newTotalMessages(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "messages_total",
|
||||
Help: "A counter displaying the total number of all processed messages.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *sinkImpl) IncreaseTotalMessages() {
|
||||
s.totalMessages.Inc()
|
||||
}
|
||||
|
||||
func newBatchSize(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "batch_size_bytes",
|
||||
Help: "A histogram displaying the size of each batch in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *sinkImpl) RecordBatchSize(size float64) {
|
||||
s.batchSize.Observe(size)
|
||||
}
|
||||
|
||||
func newTotalBatches(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "batches_total",
|
||||
Help: "A counter displaying the total number of all written batches.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *sinkImpl) IncreaseTotalBatches() {
|
||||
s.totalBatches.Inc()
|
||||
}
|
||||
|
||||
func newWrittenBytes(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "written_bytes",
|
||||
Help: "A histogram displaying the size of buffer in bytes written to session file.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *sinkImpl) RecordWrittenBytes(size float64, fileType string) {
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
s.writtenBytes.WithLabelValues(fileType).Observe(size)
|
||||
s.IncreaseTotalWrittenBytes(size, fileType)
|
||||
}
|
||||
|
||||
func newTotalWrittenBytes(serviceName string) *prometheus.CounterVec {
|
||||
return prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "written_bytes_total",
|
||||
Help: "A counter displaying the total number of bytes written to all session files.",
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *sinkImpl) IncreaseTotalWrittenBytes(size float64, fileType string) {
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
s.totalWrittenBytes.WithLabelValues(fileType).Add(size)
|
||||
}
|
||||
|
||||
func newCachedAssets(serviceName string) prometheus.Gauge {
|
||||
return prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "assets_cached",
|
||||
Help: "A gauge displaying the current number of cached assets.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *sinkImpl) IncreaseCachedAssets() {
|
||||
s.cachedAssets.Inc()
|
||||
}
|
||||
|
||||
func (s *sinkImpl) DecreaseCachedAssets() {
|
||||
s.cachedAssets.Dec()
|
||||
}
|
||||
|
||||
func newSkippedAssets(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "assets_skipped",
|
||||
Help: "A counter displaying the total number of all skipped assets.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *sinkImpl) IncreaseSkippedAssets() {
|
||||
s.skippedAssets.Inc()
|
||||
}
|
||||
|
||||
func newTotalAssets(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "assets_total",
|
||||
Help: "A counter displaying the total number of all processed assets.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *sinkImpl) IncreaseTotalAssets() {
|
||||
s.totalAssets.Inc()
|
||||
}
|
||||
|
||||
func newAssetSize(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "asset_size_bytes",
|
||||
Help: "A histogram displaying the size of each asset in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *sinkImpl) RecordAssetSize(size float64) {
|
||||
s.assetSize.Observe(size)
|
||||
}
|
||||
|
||||
func newProcessAssetDuration(serviceName string) prometheus.Histogram {
|
||||
return prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "asset_process_duration_seconds",
|
||||
Help: "A histogram displaying the duration of processing for each asset in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *sinkImpl) RecordProcessAssetDuration(durMillis float64) {
|
||||
s.processAssetDuration.Observe(durMillis / 1000.0)
|
||||
}
|
||||
219
ee/backend/pkg/metrics/storage/metrics.go
Normal file
219
ee/backend/pkg/metrics/storage/metrics.go
Normal file
|
|
@ -0,0 +1,219 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
type Storage interface {
|
||||
RecordSessionSize(fileSize float64, fileType string)
|
||||
IncreaseStorageTotalSessions()
|
||||
RecordSkippedSessionSize(fileSize float64, fileType string)
|
||||
IncreaseStorageTotalSkippedSessions()
|
||||
RecordSessionReadDuration(durMillis float64, fileType string)
|
||||
RecordSessionSortDuration(durMillis float64, fileType string)
|
||||
RecordSessionEncryptionDuration(durMillis float64, fileType string)
|
||||
RecordSessionCompressDuration(durMillis float64, fileType string)
|
||||
RecordSessionUploadDuration(durMillis float64, fileType string)
|
||||
RecordSessionCompressionRatio(ratio float64, fileType string)
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
type storageImpl struct {
|
||||
sessionSize *prometheus.HistogramVec
|
||||
totalSessions prometheus.Counter
|
||||
skippedSessionSize *prometheus.HistogramVec
|
||||
totalSkippedSessions prometheus.Counter
|
||||
sessionReadDuration *prometheus.HistogramVec
|
||||
sessionSortDuration *prometheus.HistogramVec
|
||||
sessionEncryptionDuration *prometheus.HistogramVec
|
||||
sessionCompressDuration *prometheus.HistogramVec
|
||||
sessionUploadDuration *prometheus.HistogramVec
|
||||
sessionCompressionRatio *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
func New(serviceName string) Storage {
|
||||
return &storageImpl{
|
||||
sessionSize: newSessionSize(serviceName),
|
||||
totalSessions: newTotalSessions(serviceName),
|
||||
skippedSessionSize: newSkippedSessionSize(serviceName),
|
||||
totalSkippedSessions: newTotalSkippedSessions(serviceName),
|
||||
sessionReadDuration: newSessionReadDuration(serviceName),
|
||||
sessionSortDuration: newSessionSortDuration(serviceName),
|
||||
sessionEncryptionDuration: newSessionEncryptionDuration(serviceName),
|
||||
sessionCompressDuration: newSessionCompressDuration(serviceName),
|
||||
sessionUploadDuration: newSessionUploadDuration(serviceName),
|
||||
sessionCompressionRatio: newSessionCompressionRatio(serviceName),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *storageImpl) List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
s.sessionSize,
|
||||
s.totalSessions,
|
||||
s.skippedSessionSize,
|
||||
s.sessionReadDuration,
|
||||
s.sessionSortDuration,
|
||||
s.sessionEncryptionDuration,
|
||||
s.sessionCompressDuration,
|
||||
s.sessionUploadDuration,
|
||||
s.sessionCompressionRatio,
|
||||
}
|
||||
}
|
||||
|
||||
func newSessionSize(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "session_size_bytes",
|
||||
Help: "A histogram displaying the size of each session file in bytes prior to any manipulation.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *storageImpl) RecordSessionSize(fileSize float64, fileType string) {
|
||||
s.sessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||
}
|
||||
|
||||
func newTotalSessions(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "sessions_total",
|
||||
Help: "A counter displaying the total number of all processed sessions.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *storageImpl) IncreaseStorageTotalSessions() {
|
||||
s.totalSessions.Inc()
|
||||
}
|
||||
|
||||
func newSkippedSessionSize(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "skipped_session_size_bytes",
|
||||
Help: "A histogram displaying the size of each skipped session file in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *storageImpl) RecordSkippedSessionSize(fileSize float64, fileType string) {
|
||||
s.skippedSessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||
}
|
||||
|
||||
func newTotalSkippedSessions(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "sessions_skipped_total",
|
||||
Help: "A counter displaying the total number of all skipped sessions because of the size limits.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *storageImpl) IncreaseStorageTotalSkippedSessions() {
|
||||
s.totalSkippedSessions.Inc()
|
||||
}
|
||||
|
||||
func newSessionReadDuration(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "read_duration_seconds",
|
||||
Help: "A histogram displaying the duration of reading for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *storageImpl) RecordSessionReadDuration(durMillis float64, fileType string) {
|
||||
s.sessionReadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func newSessionSortDuration(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "sort_duration_seconds",
|
||||
Help: "A histogram displaying the duration of sorting for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *storageImpl) RecordSessionSortDuration(durMillis float64, fileType string) {
|
||||
s.sessionSortDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func newSessionEncryptionDuration(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "encryption_duration_seconds",
|
||||
Help: "A histogram displaying the duration of encoding for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *storageImpl) RecordSessionEncryptionDuration(durMillis float64, fileType string) {
|
||||
s.sessionEncryptionDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func newSessionCompressDuration(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "compress_duration_seconds",
|
||||
Help: "A histogram displaying the duration of compressing for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *storageImpl) RecordSessionCompressDuration(durMillis float64, fileType string) {
|
||||
s.sessionCompressDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func newSessionUploadDuration(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "upload_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading to s3 for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *storageImpl) RecordSessionUploadDuration(durMillis float64, fileType string) {
|
||||
s.sessionUploadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func newSessionCompressionRatio(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "compression_ratio",
|
||||
Help: "A histogram displaying the compression ratio of mob files for each session.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
}
|
||||
|
||||
func (s *storageImpl) RecordSessionCompressionRatio(ratio float64, fileType string) {
|
||||
s.sessionCompressionRatio.WithLabelValues(fileType).Observe(ratio)
|
||||
}
|
||||
84
ee/backend/pkg/metrics/web/metrics.go
Normal file
84
ee/backend/pkg/metrics/web/metrics.go
Normal file
|
|
@ -0,0 +1,84 @@
|
|||
package web
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
type Web interface {
|
||||
RecordRequestSize(size float64, url string, code int)
|
||||
RecordRequestDuration(durMillis float64, url string, code int)
|
||||
IncreaseTotalRequests()
|
||||
List() []prometheus.Collector
|
||||
}
|
||||
|
||||
type webImpl struct {
|
||||
httpRequestSize *prometheus.HistogramVec
|
||||
httpRequestDuration *prometheus.HistogramVec
|
||||
httpTotalRequests prometheus.Counter
|
||||
}
|
||||
|
||||
func New(serviceName string) Web {
|
||||
return &webImpl{
|
||||
httpRequestSize: newRequestSizeMetric(serviceName),
|
||||
httpRequestDuration: newRequestDurationMetric(serviceName),
|
||||
httpTotalRequests: newTotalRequestsMetric(serviceName),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *webImpl) List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
w.httpRequestSize,
|
||||
w.httpRequestDuration,
|
||||
w.httpTotalRequests,
|
||||
}
|
||||
}
|
||||
|
||||
func newRequestSizeMetric(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "web_request_size_bytes",
|
||||
Help: "A histogram displaying the size of each HTTP request in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"url", "response_code"},
|
||||
)
|
||||
}
|
||||
|
||||
func (w *webImpl) RecordRequestSize(size float64, url string, code int) {
|
||||
w.httpRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size)
|
||||
}
|
||||
|
||||
func newRequestDurationMetric(serviceName string) *prometheus.HistogramVec {
|
||||
return prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "web_request_duration_seconds",
|
||||
Help: "A histogram displaying the duration of each HTTP request in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"url", "response_code"},
|
||||
)
|
||||
}
|
||||
|
||||
func (w *webImpl) RecordRequestDuration(durMillis float64, url string, code int) {
|
||||
w.httpRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func newTotalRequestsMetric(serviceName string) prometheus.Counter {
|
||||
return prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: serviceName,
|
||||
Name: "web_requests_total",
|
||||
Help: "A counter displaying the number all HTTP requests.",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func (w *webImpl) IncreaseTotalRequests() {
|
||||
w.httpTotalRequests.Inc()
|
||||
}
|
||||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
db "openreplay/backend/pkg/db/postgres/pool"
|
||||
|
|
@ -18,13 +19,14 @@ type Tracer interface {
|
|||
}
|
||||
|
||||
type tracerImpl struct {
|
||||
log logger.Logger
|
||||
conn db.Pool
|
||||
traces postgres.Bulk
|
||||
saver pool.WorkerPool
|
||||
log logger.Logger
|
||||
conn db.Pool
|
||||
traces postgres.Bulk
|
||||
saver pool.WorkerPool
|
||||
metrics database.Database
|
||||
}
|
||||
|
||||
func NewTracer(log logger.Logger, conn db.Pool) (Tracer, error) {
|
||||
func NewTracer(log logger.Logger, conn db.Pool, metrics database.Database) (Tracer, error) {
|
||||
switch {
|
||||
case log == nil:
|
||||
return nil, errors.New("logger is required")
|
||||
|
|
@ -32,8 +34,9 @@ func NewTracer(log logger.Logger, conn db.Pool) (Tracer, error) {
|
|||
return nil, errors.New("connection is required")
|
||||
}
|
||||
tracer := &tracerImpl{
|
||||
log: log,
|
||||
conn: conn,
|
||||
log: log,
|
||||
conn: conn,
|
||||
metrics: metrics,
|
||||
}
|
||||
if err := tracer.initBulk(); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -43,7 +46,7 @@ func NewTracer(log logger.Logger, conn db.Pool) (Tracer, error) {
|
|||
}
|
||||
|
||||
func (t *tracerImpl) initBulk() (err error) {
|
||||
t.traces, err = postgres.NewBulk(t.conn,
|
||||
t.traces, err = postgres.NewBulk(t.conn, t.metrics,
|
||||
"traces",
|
||||
"(user_id, tenant_id, auth, action, method, path_format, endpoint, payload, parameters, status)",
|
||||
"($%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d)",
|
||||
|
|
|
|||
|
|
@ -10,8 +10,15 @@ import (
|
|||
"openreplay/backend/pkg/metrics/database"
|
||||
)
|
||||
|
||||
var ErrDisabledCache = errors.New("cache is disabled")
|
||||
|
||||
type cacheImpl struct {
|
||||
db *redis.Client
|
||||
db *redis.Client
|
||||
metrics database.Database
|
||||
}
|
||||
|
||||
func NewCache(db *redis.Client, metrics database.Database) Cache {
|
||||
return &cacheImpl{db: db, metrics: metrics}
|
||||
}
|
||||
|
||||
func (c *cacheImpl) SetCache(sessID uint64, data map[string]string) error {
|
||||
|
|
@ -32,8 +39,8 @@ func (c *cacheImpl) SetCache(sessID uint64, data map[string]string) error {
|
|||
if _, err = c.db.Redis.Set(fmt.Sprintf("session:cache:id:%d", sessID), sessionBytes, time.Minute*120).Result(); err != nil {
|
||||
return err
|
||||
}
|
||||
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "setCache", "session")
|
||||
database.IncreaseRedisRequests("setCache", "sessions")
|
||||
c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "setCache", "session")
|
||||
c.metrics.IncreaseRedisRequests("setCache", "sessions")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -53,8 +60,8 @@ func (c *cacheImpl) GetCache(sessID uint64) (map[string]string, error) {
|
|||
if err = json.Unmarshal([]byte(result), &session); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "getCache", "session")
|
||||
database.IncreaseRedisRequests("getCache", "sessions")
|
||||
c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "getCache", "session")
|
||||
c.metrics.IncreaseRedisRequests("getCache", "sessions")
|
||||
return session, nil
|
||||
}
|
||||
|
||||
|
|
@ -76,8 +83,8 @@ func (c *cacheImpl) Set(session *Session) error {
|
|||
if _, err = c.db.Redis.Set(fmt.Sprintf("session:id:%d", session.SessionID), sessionBytes, time.Minute*60).Result(); err != nil {
|
||||
return err
|
||||
}
|
||||
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "set", "session")
|
||||
database.IncreaseRedisRequests("set", "sessions")
|
||||
c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "set", "session")
|
||||
c.metrics.IncreaseRedisRequests("set", "sessions")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -97,13 +104,7 @@ func (c *cacheImpl) Get(sessionID uint64) (*Session, error) {
|
|||
if err = json.Unmarshal([]byte(result), session); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "session")
|
||||
database.IncreaseRedisRequests("get", "sessions")
|
||||
c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "session")
|
||||
c.metrics.IncreaseRedisRequests("get", "sessions")
|
||||
return session, nil
|
||||
}
|
||||
|
||||
var ErrDisabledCache = errors.New("cache is disabled")
|
||||
|
||||
func NewCache(db *redis.Client) Cache {
|
||||
return &cacheImpl{db: db}
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue