feat(assets): use content encoding of the original file (#2970)

This commit is contained in:
Alexander 2025-01-24 14:35:11 +01:00 committed by GitHub
parent 1a4893bde8
commit 76b0ae7b0d
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 22 additions and 15 deletions

View file

@ -143,6 +143,7 @@ func (c *cacher) cacheURL(t *Task) {
if contentType == "" { if contentType == "" {
contentType = mime.TypeByExtension(filepath.Ext(res.Request.URL.Path)) contentType = mime.TypeByExtension(filepath.Ext(res.Request.URL.Path))
} }
contentEncoding := res.Header.Get("Content-Encoding")
// Skip html file (usually it's a CDN mock for 404 error) // Skip html file (usually it's a CDN mock for 404 error)
if strings.HasPrefix(contentType, "text/html") { if strings.HasPrefix(contentType, "text/html") {
@ -159,7 +160,7 @@ func (c *cacher) cacheURL(t *Task) {
// TODO: implement in streams // TODO: implement in streams
start = time.Now() start = time.Now()
err = c.objStorage.Upload(strings.NewReader(strData), t.cachePath, contentType, objectstorage.NoCompression) err = c.objStorage.Upload(strings.NewReader(strData), t.cachePath, contentType, contentEncoding, objectstorage.NoCompression)
if err != nil { if err != nil {
metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true) metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true)
c.Errors <- errors.Wrap(err, t.urlContext) c.Errors <- errors.Wrap(err, t.urlContext)

View file

@ -385,7 +385,7 @@ func (s *Storage) uploadSession(payload interface{}) {
metrics.RecordSessionCompressionRatio(task.domsRawSize/float64(task.doms.Len()), DOM.String()) metrics.RecordSessionCompressionRatio(task.domsRawSize/float64(task.doms.Len()), DOM.String())
// Upload session to s3 // Upload session to s3
start := time.Now() start := time.Now()
if err := s.objStorage.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", task.compression); err != nil { if err := s.objStorage.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", "", task.compression); err != nil {
s.log.Fatal(task.ctx, "failed to upload mob file, err: %s", err) s.log.Fatal(task.ctx, "failed to upload mob file, err: %s", err)
} }
uploadDoms = time.Now().Sub(start).Milliseconds() uploadDoms = time.Now().Sub(start).Milliseconds()
@ -398,7 +398,7 @@ func (s *Storage) uploadSession(payload interface{}) {
metrics.RecordSessionCompressionRatio(task.domeRawSize/float64(task.dome.Len()), DOM.String()) metrics.RecordSessionCompressionRatio(task.domeRawSize/float64(task.dome.Len()), DOM.String())
// Upload session to s3 // Upload session to s3
start := time.Now() start := time.Now()
if err := s.objStorage.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", task.compression); err != nil { if err := s.objStorage.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", "", task.compression); err != nil {
s.log.Fatal(task.ctx, "failed to upload mob file, err: %s", err) s.log.Fatal(task.ctx, "failed to upload mob file, err: %s", err)
} }
uploadDome = time.Now().Sub(start).Milliseconds() uploadDome = time.Now().Sub(start).Milliseconds()
@ -411,7 +411,7 @@ func (s *Storage) uploadSession(payload interface{}) {
metrics.RecordSessionCompressionRatio(task.devRawSize/float64(task.dev.Len()), DEV.String()) metrics.RecordSessionCompressionRatio(task.devRawSize/float64(task.dev.Len()), DEV.String())
// Upload session to s3 // Upload session to s3
start := time.Now() start := time.Now()
if err := s.objStorage.Upload(task.dev, task.id+string(DEV), "application/octet-stream", task.compression); err != nil { if err := s.objStorage.Upload(task.dev, task.id+string(DEV), "application/octet-stream", "", task.compression); err != nil {
s.log.Fatal(task.ctx, "failed to upload mob file, err: %s", err) s.log.Fatal(task.ctx, "failed to upload mob file, err: %s", err)
} }
uploadDev = time.Now().Sub(start).Milliseconds() uploadDev = time.Now().Sub(start).Milliseconds()

View file

@ -15,7 +15,7 @@ const (
) )
type ObjectStorage interface { type ObjectStorage interface {
Upload(reader io.Reader, key string, contentType string, compression CompressionType) error Upload(reader io.Reader, key string, contentType, contentEncoding string, compression CompressionType) error
Get(key string) (io.ReadCloser, error) Get(key string) (io.ReadCloser, error)
Exists(key string) bool Exists(key string) bool
GetCreationTime(key string) *time.Time GetCreationTime(key string) *time.Time

View file

@ -67,19 +67,22 @@ func NewS3(cfg *objConfig.ObjectsConfig) (objectstorage.ObjectStorage, error) {
}, nil }, nil
} }
func (s *storageImpl) Upload(reader io.Reader, key string, contentType string, compression objectstorage.CompressionType) error { func (s *storageImpl) Upload(reader io.Reader, key string, contentType, contentEncoding string, compression objectstorage.CompressionType) error {
cacheControl := "max-age=2628000, immutable, private" cacheControl := "max-age=2628000, immutable, private"
var contentEncoding *string var encoding *string
switch compression { switch compression {
case objectstorage.Gzip: case objectstorage.Gzip:
encodeStr := "gzip" encodeStr := "gzip"
contentEncoding = &encodeStr encoding = &encodeStr
case objectstorage.Brotli: case objectstorage.Brotli:
encodeStr := "br" encodeStr := "br"
contentEncoding = &encodeStr encoding = &encodeStr
case objectstorage.Zstd: case objectstorage.Zstd:
// Have to ignore contentEncoding for Zstd (otherwise will be an error in browser) // Have to ignore contentEncoding for Zstd (otherwise will be an error in browser)
} }
if contentEncoding != "" {
encoding = &contentEncoding
}
_, err := s.uploader.Upload(&s3manager.UploadInput{ _, err := s.uploader.Upload(&s3manager.UploadInput{
Body: reader, Body: reader,
@ -87,7 +90,7 @@ func (s *storageImpl) Upload(reader io.Reader, key string, contentType string, c
Key: &key, Key: &key,
ContentType: &contentType, ContentType: &contentType,
CacheControl: &cacheControl, CacheControl: &cacheControl,
ContentEncoding: contentEncoding, ContentEncoding: encoding,
Tagging: s.fileTag, Tagging: s.fileTag,
}) })
return err return err

View file

@ -50,16 +50,19 @@ func NewStorage(cfg *config.ObjectsConfig) (objectstorage.ObjectStorage, error)
}, nil }, nil
} }
func (s *storageImpl) Upload(reader io.Reader, key string, contentType string, compression objectstorage.CompressionType) error { func (s *storageImpl) Upload(reader io.Reader, key string, contentType, contentEncoding string, compression objectstorage.CompressionType) error {
cacheControl := "max-age=2628000, immutable, private" cacheControl := "max-age=2628000, immutable, private"
var contentEncoding *string var encoding *string
switch compression { switch compression {
case objectstorage.Gzip: case objectstorage.Gzip:
gzipStr := "gzip" gzipStr := "gzip"
contentEncoding = &gzipStr encoding = &gzipStr
case objectstorage.Brotli: case objectstorage.Brotli:
gzipStr := "br" gzipStr := "br"
contentEncoding = &gzipStr encoding = &gzipStr
}
if contentEncoding != "" {
encoding = &contentEncoding
} }
// Remove leading slash to avoid empty folder creation // Remove leading slash to avoid empty folder creation
if strings.HasPrefix(key, "/") { if strings.HasPrefix(key, "/") {
@ -68,7 +71,7 @@ func (s *storageImpl) Upload(reader io.Reader, key string, contentType string, c
_, err := s.client.UploadStream(context.Background(), s.container, key, reader, &azblob.UploadStreamOptions{ _, err := s.client.UploadStream(context.Background(), s.container, key, reader, &azblob.UploadStreamOptions{
HTTPHeaders: &blob.HTTPHeaders{ HTTPHeaders: &blob.HTTPHeaders{
BlobCacheControl: &cacheControl, BlobCacheControl: &cacheControl,
BlobContentEncoding: contentEncoding, BlobContentEncoding: encoding,
BlobContentType: &contentType, BlobContentType: &contentType,
}, },
Tags: s.tags, Tags: s.tags,