New env variable CLOUD (aws by default) (#1232)

* feat(backend): added new env variable CLOUD (aws by default)

* chore(backend): Adding env variable for CLOUD

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

---------

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
Co-authored-by: rjshrjndrn <rjshrjndrn@gmail.com>
This commit is contained in:
Alexander 2023-05-04 16:48:48 +02:00 committed by GitHub
parent 6291749bb7
commit 742c038ecd
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 26 additions and 6 deletions

View file

@ -85,7 +85,9 @@ ENV TZ=UTC \
CH_DATABASE="default" \
# Max file size to process, default to 100MB
MAX_FILE_SIZE=100000000 \
USE_ENCRYPTION=false
USE_ENCRYPTION=false \
# Use to enable cloud specific feature
CLOUD="aws"
RUN if [ "$SERVICE_NAME" = "http" ]; then \

View file

@ -25,7 +25,7 @@ func main() {
cfg := config.New()
s3 := cloud.NewS3(cfg.S3Region, cfg.S3Bucket)
s3 := cloud.NewS3(cfg.S3Region, cfg.S3Bucket, cfg.UseFileTags())
srv, err := storage.New(cfg, s3)
if err != nil {
log.Printf("can't init storage service: %s", err)

View file

@ -71,7 +71,7 @@ func NewCacher(cfg *config.Config) *cacher {
c := &cacher{
timeoutMap: newTimeoutMap(),
s3: storage.NewS3(cfg.AWSRegion, cfg.S3BucketAssets),
s3: storage.NewS3(cfg.AWSRegion, cfg.S3BucketAssets, cfg.UseFileTags()),
httpClient: &http.Client{
Timeout: time.Duration(6) * time.Second,
Transport: &http.Transport{

View file

@ -2,11 +2,14 @@ package common
import "strings"
// Common config for all services
type Config struct {
ConfigFilePath string `env:"CONFIG_FILE_PATH"`
MessageSizeLimit int `env:"QUEUE_MESSAGE_SIZE_LIMIT,default=1048576"`
MaxMemoryUsage uint64 `env:"MAX_MEMORY_USAGE,default=80"`
MemoryLimitMB uint64 `env:"MEMORY_LIMIT_MB,default=0"` // 0 means take limit from OS (cgroup)
CloudName string `env:"CLOUD,default=aws"`
}
type Configer interface {
@ -17,6 +20,12 @@ func (c *Config) GetConfigPath() string {
return c.ConfigFilePath
}
func (c *Config) UseFileTags() bool {
return c.CloudName != "azure"
}
// Postgres config
type Postgres struct {
Postgres string `env:"POSTGRES_STRING,required"`
ApplicationName string `env:"SERVICE_NAME,default='worker'"`

View file

@ -25,7 +25,7 @@ func New(cfg *http.Config, producer types.Producer, pgconn *cache.PGCache) *Serv
return &ServicesBuilder{
Database: pgconn,
Producer: producer,
Storage: storage.NewS3(cfg.AWSRegion, cfg.S3BucketIOSImages),
Storage: storage.NewS3(cfg.AWSRegion, cfg.S3BucketIOSImages, cfg.UseFileTags()),
Tokenizer: token.NewTokenizer(cfg.TokenSecret),
UaParser: uaparser.NewUAParser(cfg.UAParserFile),
GeoIP: geoip.NewGeoIP(cfg.MaxMinDBFile),

View file

@ -19,15 +19,17 @@ type S3 struct {
svc *_s3.S3
bucket *string
fileTag string
useTags bool
}
func NewS3(region string, bucket string) *S3 {
func NewS3(region string, bucket string, useTags bool) *S3 {
sess := env.AWSSessionOnRegion(region)
return &S3{
uploader: s3manager.NewUploader(sess),
svc: _s3.New(sess), // AWS Docs: "These clients are safe to use concurrently."
bucket: &bucket,
fileTag: loadFileTag(),
useTags: useTags,
}
}
@ -39,6 +41,13 @@ const (
Brotli
)
func (s3 *S3) tagging() *string {
if s3.useTags {
return &s3.fileTag
}
return nil
}
func (s3 *S3) Upload(reader io.Reader, key string, contentType string, compression CompressionType) error {
cacheControl := "max-age=2628000, immutable, private"
var contentEncoding *string
@ -58,7 +67,7 @@ func (s3 *S3) Upload(reader io.Reader, key string, contentType string, compressi
ContentType: &contentType,
CacheControl: &cacheControl,
ContentEncoding: contentEncoding,
Tagging: &s3.fileTag,
Tagging: s3.tagging(),
})
return err
}