refactor(backend/storage): pass FileSplitSize as env var

This commit is contained in:
Alex Kaminskii 2022-05-16 15:31:37 +02:00
parent 7d7dcc2910
commit 76d9d41ed8
4 changed files with 22 additions and 20 deletions

View file

@ -47,6 +47,7 @@ ENV TZ=UTC \
CACHE_ASSETS=true \
ASSETS_SIZE_LIMIT=6291456 \
FS_CLEAN_HRS=72 \
FILE_SPLIT_SIZE=300000 \
LOG_QUEUE_STATS_INTERVAL_SEC=60

View file

@ -46,6 +46,7 @@ ENV TZ=UTC \
CACHE_ASSETS=true \
ASSETS_SIZE_LIMIT=6291456 \
FS_CLEAN_HRS=12 \
FILE_SPLIT_SIZE=300000 \
LOG_QUEUE_STATS_INTERVAL_SEC=60
RUN mkdir $FS_DIR

View file

@ -6,27 +6,27 @@ import (
)
type Config struct {
S3Region string
S3Bucket string
FSDir string
FSCleanHRS int
SessionFileSplitSize int
RetryTimeout time.Duration
GroupStorage string
TopicTrigger string
DeleteTimeout time.Duration
S3Region string
S3Bucket string
FSDir string
FSCleanHRS int
FileSplitSize int
RetryTimeout time.Duration
GroupStorage string
TopicTrigger string
DeleteTimeout time.Duration
}
func New() *Config {
return &Config{
S3Region: env.String("AWS_REGION_WEB"),
S3Bucket: env.String("S3_BUCKET_WEB"),
FSDir: env.String("FS_DIR"),
FSCleanHRS: env.Int("FS_CLEAN_HRS"),
SessionFileSplitSize: 200000, // ~200 kB
RetryTimeout: 2 * time.Minute,
GroupStorage: env.String("GROUP_STORAGE"),
TopicTrigger: env.String("TOPIC_TRIGGER"),
DeleteTimeout: 48 * time.Hour,
S3Region: env.String("AWS_REGION_WEB"),
S3Bucket: env.String("S3_BUCKET_WEB"),
FSDir: env.String("FS_DIR"),
FSCleanHRS: env.Int("FS_CLEAN_HRS"),
FileSplitSize: env.Int("FILE_SPLIT_SIZE"),
RetryTimeout: 2 * time.Minute,
GroupStorage: env.String("GROUP_STORAGE"),
TopicTrigger: env.String("TOPIC_TRIGGER"),
DeleteTimeout: 48 * time.Hour,
}
}

View file

@ -40,7 +40,7 @@ func (s *Storage) UploadKey(key string, retryCount int) {
}
defer file.Close()
startBytes := make([]byte, s.cfg.SessionFileSplitSize)
startBytes := make([]byte, s.cfg.FileSplitSize)
nRead, err := file.Read(startBytes)
if err != nil {
log.Printf("File read error: %f", err)
@ -50,7 +50,7 @@ func (s *Storage) UploadKey(key string, retryCount int) {
if err := s.s3.Upload(s.gzipFile(startReader), key, "application/octet-stream", true); err != nil {
log.Fatalf("Storage: start upload failed. %v\n", err)
}
if nRead == s.cfg.SessionFileSplitSize {
if nRead == s.cfg.FileSplitSize {
if err := s.s3.Upload(s.gzipFile(file), key+"e", "application/octet-stream", true); err != nil {
log.Fatalf("Storage: end upload failed. %v\n", err)
}