feat (backend-assets): ASSETS_SIZE_LIMIT env var
This commit is contained in:
parent
d8db58b5f6
commit
769caeee2c
4 changed files with 13 additions and 6 deletions
|
|
@ -39,6 +39,7 @@ ENV TZ=UTC \
|
|||
AWS_REGION_WEB=eu-central-1 \
|
||||
AWS_REGION_IOS=eu-west-1 \
|
||||
AWS_REGION_ASSETS=eu-central-1 \
|
||||
ASSETS_SIZE_LIMIT=6291456
|
||||
|
||||
|
||||
ARG SERVICE_NAME
|
||||
|
|
|
|||
4
backend/pkg/env/vars.go
vendored
4
backend/pkg/env/vars.go
vendored
|
|
@ -36,6 +36,10 @@ func Uint64(key string) uint64 {
|
|||
return n
|
||||
}
|
||||
|
||||
func Int(key string) int {
|
||||
return int(Uint64(key))
|
||||
}
|
||||
|
||||
func Bool(key string) bool {
|
||||
v := String(key)
|
||||
if v != "true" && v != "false" {
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ import (
|
|||
"openreplay/backend/pkg/storage"
|
||||
)
|
||||
|
||||
const BODY_LIMIT = 6 * (1 << 20) // 6 Mb
|
||||
const MAX_CACHE_DEPTH = 5
|
||||
|
||||
type cacher struct {
|
||||
|
|
@ -26,9 +25,10 @@ type cacher struct {
|
|||
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
|
||||
rewriter *assets.Rewriter // Read only
|
||||
Errors chan error
|
||||
sizeLimit int
|
||||
}
|
||||
|
||||
func NewCacher(region string, bucket string, origin string) *cacher {
|
||||
func NewCacher(region string, bucket string, origin string, sizeLimit int) *cacher {
|
||||
rewriter := assets.NewRewriter(origin)
|
||||
return &cacher{
|
||||
timeoutMap: newTimeoutMap(),
|
||||
|
|
@ -39,8 +39,9 @@ func NewCacher(region string, bucket string, origin string) *cacher {
|
|||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
},
|
||||
},
|
||||
rewriter: rewriter,
|
||||
Errors: make(chan error),
|
||||
rewriter: rewriter,
|
||||
Errors: make(chan error),
|
||||
sizeLimit: sizeLimit,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -72,12 +73,12 @@ func (c *cacher) cacheURL(requestURL string, sessionID uint64, depth byte, conte
|
|||
c.Errors <- errors.Wrap(fmt.Errorf("Status code is %v, ", res.StatusCode), context)
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadAll(io.LimitReader(res.Body, BODY_LIMIT+1))
|
||||
data, err := ioutil.ReadAll(io.LimitReader(res.Body, int64(c.sizeLimit+1)))
|
||||
if err != nil {
|
||||
c.Errors <- errors.Wrap(err, context)
|
||||
return
|
||||
}
|
||||
if len(data) > BODY_LIMIT {
|
||||
if len(data) > c.sizeLimit {
|
||||
c.Errors <- errors.Wrap(errors.New("Maximum size exceeded"), context)
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ func main() {
|
|||
env.String("AWS_REGION"),
|
||||
env.String("S3_BUCKET_ASSETS"),
|
||||
env.String("ASSETS_ORIGIN"),
|
||||
env.Int("ASSETS_SIZE_LIMIT"),
|
||||
)
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue