V1.8.2 workers app name (#772)
* feat(backend): added application name to postgres connection Co-authored-by: Alexander Zavorotynskiy <zavorotynskiy@pm.me>
This commit is contained in:
parent
d76caf7e4e
commit
767d2f6f0c
10 changed files with 31 additions and 10 deletions
|
|
@ -25,6 +25,7 @@ LABEL GIT_SHA=$GIT_SHA
|
|||
RUN apk add --no-cache ca-certificates librdkafka-dev cyrus-sasl cyrus-sasl-gssapiv2 krb5
|
||||
RUN adduser -u 1001 openreplay -D
|
||||
|
||||
ARG SERVICE_NAME
|
||||
ENV TZ=UTC \
|
||||
GIT_SHA=$GIT_SHA \
|
||||
FS_ULIMIT=10000 \
|
||||
|
|
@ -76,11 +77,11 @@ ENV TZ=UTC \
|
|||
USE_FAILOVER=false \
|
||||
GROUP_STORAGE_FAILOVER=failover \
|
||||
TOPIC_STORAGE_FAILOVER=storage-failover \
|
||||
SERVICE_NAME=$SERVICE_NAME \
|
||||
PROFILER_ENABLED=false \
|
||||
COMPRESSION_TYPE=zstd
|
||||
|
||||
|
||||
ARG SERVICE_NAME
|
||||
RUN if [ "$SERVICE_NAME" = "http" ]; then \
|
||||
wget https://raw.githubusercontent.com/ua-parser/uap-core/master/regexes.yaml -O "$UAPARSER_FILE" &&\
|
||||
wget https://static.openreplay.com/geoip/GeoLite2-Country.mmdb -O "$MAXMINDDB_FILE"; fi
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ func main() {
|
|||
|
||||
// Init database
|
||||
pg := cache.NewPGCache(
|
||||
postgres.NewConn(cfg.Postgres, cfg.BatchQueueLimit, cfg.BatchSizeLimit, metrics), cfg.ProjectExpirationTimeoutMs)
|
||||
postgres.NewConn(cfg.Postgres.String(), cfg.BatchQueueLimit, cfg.BatchSizeLimit, metrics), cfg.ProjectExpirationTimeoutMs)
|
||||
defer pg.Close()
|
||||
|
||||
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ func main() {
|
|||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres, 0, 0, metrics), cfg.ProjectExpirationTimeoutMs)
|
||||
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics), cfg.ProjectExpirationTimeoutMs)
|
||||
defer pg.Close()
|
||||
|
||||
sessions, err := sessionender.New(metrics, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ func main() {
|
|||
defer producer.Close(15000)
|
||||
|
||||
// Connect to database
|
||||
dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres, 0, 0, metrics), 1000*60*20)
|
||||
dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics), 1000*60*20)
|
||||
defer dbConn.Close()
|
||||
|
||||
// Build all services
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ func main() {
|
|||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
pg := postgres.NewConn(cfg.PostgresURI, 0, 0, metrics)
|
||||
pg := postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics)
|
||||
defer pg.Close()
|
||||
|
||||
tokenizer := token.NewTokenizer(cfg.TokenSecret)
|
||||
|
|
@ -50,7 +50,7 @@ func main() {
|
|||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
defer producer.Close(15000)
|
||||
|
||||
listener, err := postgres.NewIntegrationsListener(cfg.PostgresURI)
|
||||
listener, err := postgres.NewIntegrationsListener(cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Printf("Postgres listener error: %v\n", err)
|
||||
log.Fatalf("Postgres listener error")
|
||||
|
|
|
|||
|
|
@ -1,5 +1,7 @@
|
|||
package common
|
||||
|
||||
import "strings"
|
||||
|
||||
type Config struct {
|
||||
ConfigFilePath string `env:"CONFIG_FILE_PATH"`
|
||||
MessageSizeLimit int `env:"QUEUE_MESSAGE_SIZE_LIMIT,default=1048576"`
|
||||
|
|
@ -12,3 +14,21 @@ type Configer interface {
|
|||
func (c *Config) GetConfigPath() string {
|
||||
return c.ConfigFilePath
|
||||
}
|
||||
|
||||
type Postgres struct {
|
||||
Postgres string `env:"POSTGRES_STRING,required"`
|
||||
ApplicationName string `env:"SERVICE_NAME,default='worker'"`
|
||||
}
|
||||
|
||||
func (cfg *Postgres) String() string {
|
||||
str := cfg.Postgres
|
||||
if !strings.Contains(cfg.Postgres, "application_name") {
|
||||
if strings.Contains(cfg.Postgres, "?") {
|
||||
str += "&"
|
||||
} else {
|
||||
str += "?"
|
||||
}
|
||||
str += "application_name=" + cfg.ApplicationName
|
||||
}
|
||||
return str
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import (
|
|||
|
||||
type Config struct {
|
||||
common.Config
|
||||
Postgres string `env:"POSTGRES_STRING,required"`
|
||||
common.Postgres
|
||||
ProjectExpirationTimeoutMs int64 `env:"PROJECT_EXPIRATION_TIMEOUT_MS,default=1200000"`
|
||||
LoggerTimeout int `env:"LOG_QUEUE_STATS_INTERVAL_SEC,required"`
|
||||
GroupDB string `env:"GROUP_DB,required"`
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
type Config struct {
|
||||
common.Config
|
||||
Postgres string `env:"POSTGRES_STRING,required"`
|
||||
common.Postgres
|
||||
ProjectExpirationTimeoutMs int64 `env:"PROJECT_EXPIRATION_TIMEOUT_MS,default=1200000"`
|
||||
GroupEnder string `env:"GROUP_ENDER,required"`
|
||||
LoggerTimeout int `env:"LOG_QUEUE_STATS_INTERVAL_SEC,required"`
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
type Config struct {
|
||||
common.Config
|
||||
common.Postgres
|
||||
HTTPHost string `env:"HTTP_HOST,default="`
|
||||
HTTPPort string `env:"HTTP_PORT,required"`
|
||||
HTTPTimeout time.Duration `env:"HTTP_TIMEOUT,default=60s"`
|
||||
|
|
@ -19,7 +20,6 @@ type Config struct {
|
|||
FileSizeLimit int64 `env:"FILE_SIZE_LIMIT,default=10000000"`
|
||||
AWSRegion string `env:"AWS_REGION,required"`
|
||||
S3BucketIOSImages string `env:"S3_BUCKET_IOS_IMAGES,required"`
|
||||
Postgres string `env:"POSTGRES_STRING,required"`
|
||||
TokenSecret string `env:"TOKEN_SECRET,required"`
|
||||
UAParserFile string `env:"UAPARSER_FILE,required"`
|
||||
MaxMinDBFile string `env:"MAXMINDDB_FILE,required"`
|
||||
|
|
|
|||
|
|
@ -7,8 +7,8 @@ import (
|
|||
|
||||
type Config struct {
|
||||
common.Config
|
||||
common.Postgres
|
||||
TopicAnalytics string `env:"TOPIC_ANALYTICS,required"`
|
||||
PostgresURI string `env:"POSTGRES_STRING,required"`
|
||||
TokenSecret string `env:"TOKEN_SECRET,required"`
|
||||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue