feat(backend): added env variable for enabling pprof (#880)

This commit is contained in:
Alexander 2022-12-13 17:48:00 +01:00 committed by GitHub
parent 19b6c896b9
commit 09adccf4be
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
16 changed files with 42 additions and 8 deletions

View file

@ -3,6 +3,7 @@ package main
import ( import (
"context" "context"
"log" "log"
"openreplay/backend/pkg/pprof"
"os" "os"
"os/signal" "os/signal"
"syscall" "syscall"
@ -18,10 +19,12 @@ import (
func main() { func main() {
metrics := monitoring.New("assets") metrics := monitoring.New("assets")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := config.New() cfg := config.New()
if cfg.UseProfiler {
pprof.StartProfilingServer()
}
cacher := cacher.NewCacher(cfg, metrics) cacher := cacher.NewCacher(cfg, metrics)

View file

@ -4,6 +4,7 @@ import (
"errors" "errors"
"log" "log"
types2 "openreplay/backend/pkg/db/types" types2 "openreplay/backend/pkg/db/types"
"openreplay/backend/pkg/pprof"
"openreplay/backend/pkg/queue/types" "openreplay/backend/pkg/queue/types"
"os" "os"
"os/signal" "os/signal"
@ -25,10 +26,12 @@ import (
func main() { func main() {
metrics := monitoring.New("db") metrics := monitoring.New("db")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := db.New() cfg := db.New()
if cfg.UseProfiler {
pprof.StartProfilingServer()
}
// Init database // Init database
pg := cache.NewPGCache( pg := cache.NewPGCache(

View file

@ -3,6 +3,7 @@ package main
import ( import (
"log" "log"
"openreplay/backend/internal/storage" "openreplay/backend/internal/storage"
"openreplay/backend/pkg/pprof"
"os" "os"
"os/signal" "os/signal"
"strings" "strings"
@ -21,9 +22,13 @@ import (
) )
func main() { func main() {
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
metrics := monitoring.New("ender") metrics := monitoring.New("ender")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := ender.New() cfg := ender.New()
if cfg.UseProfiler {
pprof.StartProfilingServer()
}
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres, 0, 0, metrics), cfg.ProjectExpirationTimeoutMs) pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres, 0, 0, metrics), cfg.ProjectExpirationTimeoutMs)
defer pg.Close() defer pg.Close()

View file

@ -2,6 +2,7 @@ package main
import ( import (
"log" "log"
"openreplay/backend/pkg/pprof"
"os" "os"
"os/signal" "os/signal"
"syscall" "syscall"
@ -20,8 +21,10 @@ import (
func main() { func main() {
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
// Load service configuration
cfg := heuristics.New() cfg := heuristics.New()
if cfg.UseProfiler {
pprof.StartProfilingServer()
}
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message. // HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
handlersFabric := func() []handlers.MessageProcessor { handlersFabric := func() []handlers.MessageProcessor {

View file

@ -7,6 +7,7 @@ import (
"openreplay/backend/internal/http/server" "openreplay/backend/internal/http/server"
"openreplay/backend/internal/http/services" "openreplay/backend/internal/http/services"
"openreplay/backend/pkg/monitoring" "openreplay/backend/pkg/monitoring"
"openreplay/backend/pkg/pprof"
"os" "os"
"os/signal" "os/signal"
"syscall" "syscall"
@ -18,10 +19,12 @@ import (
func main() { func main() {
metrics := monitoring.New("http") metrics := monitoring.New("http")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := http.New() cfg := http.New()
if cfg.UseProfiler {
pprof.StartProfilingServer()
}
// Connect to queue // Connect to queue
producer := queue.NewProducer(cfg.MessageSizeLimit, true) producer := queue.NewProducer(cfg.MessageSizeLimit, true)

View file

@ -5,6 +5,7 @@ import (
config "openreplay/backend/internal/config/integrations" config "openreplay/backend/internal/config/integrations"
"openreplay/backend/internal/integrations/clientManager" "openreplay/backend/internal/integrations/clientManager"
"openreplay/backend/pkg/monitoring" "openreplay/backend/pkg/monitoring"
"openreplay/backend/pkg/pprof"
"time" "time"
"os" "os"
@ -19,10 +20,12 @@ import (
func main() { func main() {
metrics := monitoring.New("integrations") metrics := monitoring.New("integrations")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := config.New() cfg := config.New()
if cfg.UseProfiler {
pprof.StartProfilingServer()
}
pg := postgres.NewConn(cfg.PostgresURI, 0, 0, metrics) pg := postgres.NewConn(cfg.PostgresURI, 0, 0, metrics)
defer pg.Close() defer pg.Close()

View file

@ -3,6 +3,7 @@ package main
import ( import (
"context" "context"
"log" "log"
"openreplay/backend/pkg/pprof"
"os" "os"
"os/signal" "os/signal"
"syscall" "syscall"
@ -20,10 +21,12 @@ import (
func main() { func main() {
metrics := monitoring.New("sink") metrics := monitoring.New("sink")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := sink.New() cfg := sink.New()
if cfg.UseProfiler {
pprof.StartProfilingServer()
}
if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) { if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) {
log.Fatalf("%v doesn't exist. %v", cfg.FsDir, err) log.Fatalf("%v doesn't exist. %v", cfg.FsDir, err)

View file

@ -2,6 +2,7 @@ package main
import ( import (
"log" "log"
"openreplay/backend/pkg/pprof"
"os" "os"
"os/signal" "os/signal"
"syscall" "syscall"
@ -18,10 +19,12 @@ import (
func main() { func main() {
metrics := monitoring.New("storage") metrics := monitoring.New("storage")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := config.New() cfg := config.New()
if cfg.UseProfiler {
pprof.StartProfilingServer()
}
s3 := s3storage.NewS3(cfg.S3Region, cfg.S3Bucket) s3 := s3storage.NewS3(cfg.S3Region, cfg.S3Bucket)
srv, err := storage.New(cfg, s3, metrics) srv, err := storage.New(cfg, s3, metrics)

View file

@ -14,6 +14,7 @@ type Config struct {
AssetsOrigin string `env:"ASSETS_ORIGIN,required"` AssetsOrigin string `env:"ASSETS_ORIGIN,required"`
AssetsSizeLimit int `env:"ASSETS_SIZE_LIMIT,required"` AssetsSizeLimit int `env:"ASSETS_SIZE_LIMIT,required"`
AssetsRequestHeaders map[string]string `env:"ASSETS_REQUEST_HEADERS"` AssetsRequestHeaders map[string]string `env:"ASSETS_REQUEST_HEADERS"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
} }
func New() *Config { func New() *Config {

View file

@ -18,6 +18,7 @@ type Config struct {
BatchQueueLimit int `env:"DB_BATCH_QUEUE_LIMIT,required"` BatchQueueLimit int `env:"DB_BATCH_QUEUE_LIMIT,required"`
BatchSizeLimit int `env:"DB_BATCH_SIZE_LIMIT,required"` BatchSizeLimit int `env:"DB_BATCH_SIZE_LIMIT,required"`
UseQuickwit bool `env:"QUICKWIT_ENABLED,default=false"` UseQuickwit bool `env:"QUICKWIT_ENABLED,default=false"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
} }
func New() *Config { func New() *Config {

View file

@ -15,6 +15,7 @@ type Config struct {
ProducerTimeout int `env:"PRODUCER_TIMEOUT,default=2000"` ProducerTimeout int `env:"PRODUCER_TIMEOUT,default=2000"`
PartitionsNumber int `env:"PARTITIONS_NUMBER,required"` PartitionsNumber int `env:"PARTITIONS_NUMBER,required"`
UseEncryption bool `env:"USE_ENCRYPTION,default=false"` UseEncryption bool `env:"USE_ENCRYPTION,default=false"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
} }
func New() *Config { func New() *Config {

View file

@ -13,6 +13,7 @@ type Config struct {
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"` TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
TopicRawIOS string `env:"TOPIC_RAW_IOS,required"` TopicRawIOS string `env:"TOPIC_RAW_IOS,required"`
ProducerTimeout int `env:"PRODUCER_TIMEOUT,default=2000"` ProducerTimeout int `env:"PRODUCER_TIMEOUT,default=2000"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
} }
func New() *Config { func New() *Config {

View file

@ -23,6 +23,7 @@ type Config struct {
TokenSecret string `env:"TOKEN_SECRET,required"` TokenSecret string `env:"TOKEN_SECRET,required"`
UAParserFile string `env:"UAPARSER_FILE,required"` UAParserFile string `env:"UAPARSER_FILE,required"`
MaxMinDBFile string `env:"MAXMINDDB_FILE,required"` MaxMinDBFile string `env:"MAXMINDDB_FILE,required"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
WorkerID uint16 WorkerID uint16
} }

View file

@ -10,6 +10,7 @@ type Config struct {
TopicAnalytics string `env:"TOPIC_ANALYTICS,required"` TopicAnalytics string `env:"TOPIC_ANALYTICS,required"`
PostgresURI string `env:"POSTGRES_STRING,required"` PostgresURI string `env:"POSTGRES_STRING,required"`
TokenSecret string `env:"TOKEN_SECRET,required"` TokenSecret string `env:"TOKEN_SECRET,required"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
} }
func New() *Config { func New() *Config {

View file

@ -21,6 +21,7 @@ type Config struct {
ProducerCloseTimeout int `env:"PRODUCER_CLOSE_TIMEOUT,default=15000"` ProducerCloseTimeout int `env:"PRODUCER_CLOSE_TIMEOUT,default=15000"`
CacheThreshold int64 `env:"CACHE_THRESHOLD,default=5"` CacheThreshold int64 `env:"CACHE_THRESHOLD,default=5"`
CacheExpiration int64 `env:"CACHE_EXPIRATION,default=120"` CacheExpiration int64 `env:"CACHE_EXPIRATION,default=120"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
} }
func New() *Config { func New() *Config {

View file

@ -21,6 +21,7 @@ type Config struct {
ProducerCloseTimeout int `env:"PRODUCER_CLOSE_TIMEOUT,default=15000"` ProducerCloseTimeout int `env:"PRODUCER_CLOSE_TIMEOUT,default=15000"`
UseFailover bool `env:"USE_FAILOVER,default=false"` UseFailover bool `env:"USE_FAILOVER,default=false"`
MaxFileSize int64 `env:"MAX_FILE_SIZE,default=524288000"` MaxFileSize int64 `env:"MAX_FILE_SIZE,default=524288000"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
} }
func New() *Config { func New() *Config {