Azure support (#1270)

* feat(backend): implemented azure blob storage support

* feat(backend): added azure implementation to backend services

* feat(backend): added azure blob storage support to chalice service

* fix(backend): removed prev version of s3

* feat(backend): moved azure support to ee subfolder

* feat(backend): prepared ee code for new utils.objects package

* feat(backend): added missed modules to go.mod

* feat(backend): added missed modules to go.sum

* feat(backend): go mod tidy

* feat(backend): temporary made s3 vars are not required

* feat(backend): added azure lib to ee chalice

* feat(api): changed azure env var name

* feat(api): added new object store and extra methods to chalice ee

* fix(api): added azure blob lib to alerts

* fix(api): fixed incorrect call in sessions_devtool

* fix(crons): added azure blob storage library to requirements list

* chore(build): Error message for not providing flag.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* feat(backend): removed ios headers and object store for ios messages

* feat(backend): object config refactoring

* chore(helm): Update BUCKET_NAME

* fix(backend): removed object storage usage in http

* feat(backend): added debug logs to azure upload method

* fix(backend): fixed empty folder issue

* fix(backend): removed extra debug log

* chore(helm): Adding global variables for crons

* chore(helm): Remove clickhouse resource limit

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(backend): removed assets debug log

* feat(api): use ABC class instead of empty interface

* feat(api): renamed helpers to generators

* feat(api): changed prep/clean dev scripts

* feat(api): changed name obj_store -> StorageClient

* feat(api): some changes after code review

* fix(api): removed unnecesery packages in oss api

* feat(backend): moved azure implementation to ee folder

---------

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
Co-authored-by: rjshrjndrn <rjshrjndrn@gmail.com>
This commit is contained in:
Alexander 2023-06-08 13:31:54 +02:00 committed by GitHub
parent ec11f617c2
commit 111d6a1b54
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
57 changed files with 833 additions and 554 deletions

View file

@ -45,5 +45,8 @@ function build_crons(){
check_prereq
[[ $1 == "ee" ]] && {
build_crons $1
} || {
echo -e "Crons is only for ee. Rerun the script using \n bash $0 ee"
exit 100
}

View file

@ -6,8 +6,9 @@ from fastapi import HTTPException, status
import schemas
from chalicelib.core import sessions, funnels, errors, issues, metrics, click_maps, sessions_mobs, product_analytics
from chalicelib.utils import helper, pg_client, s3
from chalicelib.utils import helper, pg_client
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.storage import StorageClient
PIE_CHART_GROUP = 5
@ -601,7 +602,7 @@ def make_chart_from_card(project_id, user_id, metric_id, data: schemas.CardChart
__get_mob_keys(project_id=project_id, session_id=raw_metric["data"]["sessionId"])
mob_exists = False
for k in keys:
if s3.exists(bucket=config("sessions_bucket"), key=k):
if StorageClient.exists(bucket=config("sessions_bucket"), key=k):
mob_exists = True
break
if mob_exists:

View file

@ -1,5 +1,5 @@
from chalicelib.core import projects
from chalicelib.utils import s3
from chalicelib.utils.storage import StorageClient
from decouple import config
@ -7,7 +7,7 @@ def sign_keys(project_id, session_id, keys):
result = []
project_key = projects.get_project_key(project_id)
for k in keys:
result.append(s3.get_presigned_url_for_sharing(bucket=config("iosBucket"),
key=f"{project_key}/{session_id}/{k}",
expires_in=60 * 60))
result.append(StorageClient.get_presigned_url_for_sharing(bucket=config("iosBucket"),
key=f"{project_key}/{session_id}/{k}",
expires_in=60 * 60))
return result

View file

@ -1,6 +1,6 @@
from decouple import config
from chalicelib.utils import s3
from chalicelib.utils.storage import StorageClient
def __get_devtools_keys(project_id, session_id):
@ -16,12 +16,12 @@ def __get_devtools_keys(project_id, session_id):
def get_urls(session_id, project_id, check_existence: bool = True):
results = []
for k in __get_devtools_keys(project_id=project_id, session_id=session_id):
if check_existence and not s3.exists(bucket=config("sessions_bucket"), key=k):
if check_existence and not StorageClient.exists(bucket=config("sessions_bucket"), key=k):
continue
results.append(s3.client.generate_presigned_url(
'get_object',
Params={'Bucket': config("sessions_bucket"), 'Key': k},
ExpiresIn=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900)
results.append(StorageClient.get_presigned_url_for_sharing(
bucket=config("sessions_bucket"),
expires_in=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900),
key=k
))
return results
@ -29,4 +29,4 @@ def get_urls(session_id, project_id, check_existence: bool = True):
def delete_mobs(project_id, session_ids):
for session_id in session_ids:
for k in __get_devtools_keys(project_id=project_id, session_id=session_id):
s3.tag_for_deletion(bucket=config("sessions_bucket"), key=k)
StorageClient.tag_for_deletion(bucket=config("sessions_bucket"), key=k)

View file

@ -1,6 +1,6 @@
from decouple import config
from chalicelib.utils import s3
from chalicelib.utils.storage import StorageClient
def __get_mob_keys(project_id, session_id):
@ -21,12 +21,12 @@ def __get_mob_keys_deprecated(session_id):
def get_urls(project_id, session_id, check_existence: bool = True):
results = []
for k in __get_mob_keys(project_id=project_id, session_id=session_id):
if check_existence and not s3.exists(bucket=config("sessions_bucket"), key=k):
if check_existence and not StorageClient.exists(bucket=config("sessions_bucket"), key=k):
continue
results.append(s3.client.generate_presigned_url(
'get_object',
Params={'Bucket': config("sessions_bucket"), 'Key': k},
ExpiresIn=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900)
results.append(StorageClient.get_presigned_url_for_sharing(
bucket=config("sessions_bucket"),
expires_in=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900),
key=k
))
return results
@ -34,24 +34,21 @@ def get_urls(project_id, session_id, check_existence: bool = True):
def get_urls_depercated(session_id, check_existence: bool = True):
results = []
for k in __get_mob_keys_deprecated(session_id=session_id):
if check_existence and not s3.exists(bucket=config("sessions_bucket"), key=k):
if check_existence and not StorageClient.exists(bucket=config("sessions_bucket"), key=k):
continue
results.append(s3.client.generate_presigned_url(
'get_object',
Params={'Bucket': config("sessions_bucket"), 'Key': k},
ExpiresIn=100000
results.append(StorageClient.get_presigned_url_for_sharing(
bucket=config("sessions_bucket"),
expires_in=100000,
key=k
))
return results
def get_ios(session_id):
return s3.client.generate_presigned_url(
'get_object',
Params={
'Bucket': config("ios_bucket"),
'Key': str(session_id)
},
ExpiresIn=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900)
return StorageClient.get_presigned_url_for_sharing(
bucket=config("ios_bucket"),
expires_in=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900),
key=str(session_id)
)
@ -59,4 +56,4 @@ def delete_mobs(project_id, session_ids):
for session_id in session_ids:
for k in __get_mob_keys(project_id=project_id, session_id=session_id) \
+ __get_mob_keys_deprecated(session_id=session_id):
s3.tag_for_deletion(bucket=config("sessions_bucket"), key=k)
StorageClient.tag_for_deletion(bucket=config("sessions_bucket"), key=k)

View file

@ -4,24 +4,24 @@ import requests
from decouple import config
from chalicelib.core import sourcemaps_parser
from chalicelib.utils import s3
from chalicelib.utils.storage import StorageClient, generators
def presign_share_urls(project_id, urls):
results = []
for u in urls:
results.append(s3.get_presigned_url_for_sharing(bucket=config('sourcemaps_bucket'), expires_in=120,
key=s3.generate_file_key_from_url(project_id, u),
check_exists=True))
results.append(StorageClient.get_presigned_url_for_sharing(bucket=config('sourcemaps_bucket'), expires_in=120,
key=generators.generate_file_key_from_url(project_id, u),
check_exists=True))
return results
def presign_upload_urls(project_id, urls):
results = []
for u in urls:
results.append(s3.get_presigned_url_for_upload(bucket=config('sourcemaps_bucket'),
expires_in=1800,
key=s3.generate_file_key_from_url(project_id, u)))
results.append(StorageClient.get_presigned_url_for_upload(bucket=config('sourcemaps_bucket'),
expires_in=1800,
key=generators.generate_file_key_from_url(project_id, u)))
return results
@ -87,7 +87,7 @@ def get_traces_group(project_id, payload):
file_exists_in_bucket = False
file_exists_in_server = False
file_url = u["absPath"]
key = s3.generate_file_key_from_url(project_id, file_url) # use filename instead?
key = generators.generate_file_key_from_url(project_id, file_url) # use filename instead?
params_idx = file_url.find("?")
if file_url and len(file_url) > 0 \
and not (file_url[:params_idx] if params_idx > -1 else file_url).endswith(".js"):
@ -95,7 +95,7 @@ def get_traces_group(project_id, payload):
payloads[key] = None
if key not in payloads:
file_exists_in_bucket = len(file_url) > 0 and s3.exists(config('sourcemaps_bucket'), key)
file_exists_in_bucket = len(file_url) > 0 and StorageClient.exists(config('sourcemaps_bucket'), key)
if len(file_url) > 0 and not file_exists_in_bucket:
print(f"{u['absPath']} sourcemap (key '{key}') doesn't exist in S3 looking in server")
if not file_url.endswith(".map"):
@ -153,7 +153,7 @@ def fetch_missed_contexts(frames):
file = source_cache[file_abs_path]
else:
file_path = get_js_cache_path(file_abs_path)
file = s3.get_file(config('js_cache_bucket'), file_path)
file = StorageClient.get_file(config('js_cache_bucket'), file_path)
if file is None:
print(f"Missing abs_path: {file_abs_path}, file {file_path} not found in {config('js_cache_bucket')}")
source_cache[file_abs_path] = file

View file

@ -1,148 +0,0 @@
import hashlib
from urllib.parse import urlparse
import boto3
import botocore
from botocore.client import Config
from botocore.exceptions import ClientError
from decouple import config
from requests.models import PreparedRequest
if not config("S3_HOST", default=False):
client = boto3.client('s3')
else:
client = boto3.client('s3', endpoint_url=config("S3_HOST"),
aws_access_key_id=config("S3_KEY"),
aws_secret_access_key=config("S3_SECRET"),
config=Config(signature_version='s3v4'),
region_name=config("sessions_region"),
verify=not config("S3_DISABLE_SSL_VERIFY", default=False, cast=bool))
def __get_s3_resource():
if not config("S3_HOST", default=False):
return boto3.resource('s3')
return boto3.resource('s3', endpoint_url=config("S3_HOST"),
aws_access_key_id=config("S3_KEY"),
aws_secret_access_key=config("S3_SECRET"),
config=Config(signature_version='s3v4'),
region_name=config("sessions_region"),
verify=not config("S3_DISABLE_SSL_VERIFY", default=False, cast=bool))
def exists(bucket, key):
try:
__get_s3_resource().Object(bucket, key).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
return False
else:
# Something else has gone wrong.
raise
return True
def get_presigned_url_for_sharing(bucket, expires_in, key, check_exists=False):
if check_exists and not exists(bucket, key):
return None
return client.generate_presigned_url(
'get_object',
Params={
'Bucket': bucket,
'Key': key
},
ExpiresIn=expires_in
)
def get_presigned_url_for_upload(bucket, expires_in, key, **args):
return client.generate_presigned_url(
'put_object',
Params={
'Bucket': bucket,
'Key': key
},
ExpiresIn=expires_in
)
def get_presigned_url_for_upload_secure(bucket, expires_in, key, conditions=None, public=False, content_type=None):
acl = 'private'
if public:
acl = 'public-read'
fields = {"acl": acl}
if content_type:
fields["Content-Type"] = content_type
url_parts = client.generate_presigned_post(
Bucket=bucket,
Key=key,
ExpiresIn=expires_in,
Fields=fields,
Conditions=conditions,
)
req = PreparedRequest()
req.prepare_url(
f"{url_parts['url']}/{url_parts['fields']['key']}", url_parts['fields'])
return req.url
def get_file(source_bucket, source_key):
try:
result = client.get_object(
Bucket=source_bucket,
Key=source_key
)
except ClientError as ex:
if ex.response['Error']['Code'] == 'NoSuchKey':
return None
else:
raise ex
return result["Body"].read().decode()
def rename(source_bucket, source_key, target_bucket, target_key):
s3 = __get_s3_resource()
s3.Object(target_bucket, target_key).copy_from(
CopySource=f'{source_bucket}/{source_key}')
s3.Object(source_bucket, source_key).delete()
def tag_for_deletion(bucket, key):
if not exists(bucket, key):
return False
# Copy the file to change the creation date, so it can be deleted X days after the tag's creation
s3 = __get_s3_resource()
s3_target = s3.Object(bucket, key)
s3_target.copy_from(
CopySource={'Bucket': bucket, 'Key': key},
MetadataDirective='COPY',
TaggingDirective='COPY'
)
tag_file(bucket=bucket, file_key=key, tag_key='to_delete_in_days', tag_value=config("SCH_DELETE_DAYS", default='7'))
def generate_file_key(project_id, key):
return f"{project_id}/{hashlib.md5(key.encode()).hexdigest()}"
def generate_file_key_from_url(project_id, url):
u = urlparse(url)
new_url = u.scheme + "://" + u.netloc + u.path
return generate_file_key(project_id=project_id, key=new_url)
def tag_file(file_key, bucket, tag_key, tag_value):
return client.put_object_tagging(
Bucket=bucket,
Key=file_key,
Tagging={
'TagSet': [
{
'Key': tag_key,
'Value': tag_value
},
]
}
)

View file

@ -0,0 +1,4 @@
from .s3 import AmazonS3Storage
# Init global object storage client
StorageClient = AmazonS3Storage()

View file

@ -0,0 +1,12 @@
import hashlib
from urllib.parse import urlparse
def generate_file_key(project_id, key):
return f"{project_id}/{hashlib.md5(key.encode()).hexdigest()}"
def generate_file_key_from_url(project_id, url):
u = urlparse(url)
new_url = u.scheme + "://" + u.netloc + u.path
return generate_file_key(project_id=project_id, key=new_url)

View file

@ -0,0 +1,28 @@
from abc import ABC, abstractmethod
class ObjectStorage(ABC):
@abstractmethod
def exists(self, bucket, key):
# Returns True if the object exists in the bucket, False otherwise
pass
@abstractmethod
def get_file(self, source_bucket, source_key):
# Download and returns the file contents as bytes
pass
@abstractmethod
def get_presigned_url_for_sharing(self, bucket, expires_in, key, check_exists=False):
# Returns a pre-signed URL for downloading the file from the object storage
pass
@abstractmethod
def get_presigned_url_for_upload(self, bucket, expires_in, key, **args):
# Returns a pre-signed URL for uploading the file to the object storage
pass
@abstractmethod
def tag_for_deletion(self, bucket, key):
# Adds the special tag 'to_delete_in_days' to the file to mark it for deletion
pass

View file

@ -0,0 +1,121 @@
import boto3
import botocore
from botocore.client import Config
from botocore.exceptions import ClientError
from decouple import config
from requests.models import PreparedRequest
from chalicelib.utils.storage.interface import ObjectStorage
class AmazonS3Storage(ObjectStorage):
if not config("S3_HOST", default=False):
client = boto3.client('s3')
resource = boto3.resource('s3')
else:
client = boto3.client('s3', endpoint_url=config("S3_HOST"),
aws_access_key_id=config("S3_KEY"),
aws_secret_access_key=config("S3_SECRET"),
config=Config(signature_version='s3v4'),
region_name=config("sessions_region"),
verify=not config("S3_DISABLE_SSL_VERIFY", default=False, cast=bool))
resource = boto3.resource('s3', endpoint_url=config("S3_HOST"),
aws_access_key_id=config("S3_KEY"),
aws_secret_access_key=config("S3_SECRET"),
config=Config(signature_version='s3v4'),
region_name=config("sessions_region"),
verify=not config("S3_DISABLE_SSL_VERIFY", default=False, cast=bool))
def exists(self, bucket, key):
try:
self.resource.Object(bucket, key).load()
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
return False
else:
# Something else has gone wrong.
raise
return True
def get_presigned_url_for_sharing(self, bucket, expires_in, key, check_exists=False):
if check_exists and not self.exists(bucket, key):
return None
return self.client.generate_presigned_url(
'get_object',
Params={
'Bucket': bucket,
'Key': key
},
ExpiresIn=expires_in
)
def get_presigned_url_for_upload(self, bucket, expires_in, key, **args):
return self.client.generate_presigned_url(
'put_object',
Params={
'Bucket': bucket,
'Key': key
},
ExpiresIn=expires_in
)
def get_presigned_url_for_upload_secure(self, bucket, expires_in, key, conditions=None, public=False,
content_type=None):
acl = 'private'
if public:
acl = 'public-read'
fields = {"acl": acl}
if content_type:
fields["Content-Type"] = content_type
url_parts = self.client.generate_presigned_post(
Bucket=bucket,
Key=key,
ExpiresIn=expires_in,
Fields=fields,
Conditions=conditions,
)
req = PreparedRequest()
req.prepare_url(
f"{url_parts['url']}/{url_parts['fields']['key']}", url_parts['fields'])
return req.url
def get_file(self, source_bucket, source_key):
try:
result = self.client.get_object(
Bucket=source_bucket,
Key=source_key
)
except ClientError as ex:
if ex.response['Error']['Code'] == 'NoSuchKey':
return None
else:
raise ex
return result["Body"].read().decode()
def tag_for_deletion(self, bucket, key):
if not self.exists(bucket, key):
return False
# Copy the file to change the creation date, so it can be deleted X days after the tag's creation
s3_target = self.resource.Object(bucket, key)
s3_target.copy_from(
CopySource={'Bucket': bucket, 'Key': key},
MetadataDirective='COPY',
TaggingDirective='COPY'
)
self.tag_file(bucket=bucket, file_key=key, tag_key='to_delete_in_days',
tag_value=config("SCH_DELETE_DAYS", default='7'))
def tag_file(self, file_key, bucket, tag_key, tag_value):
return self.client.put_object_tagging(
Bucket=bucket,
Key=file_key,
Tagging={
'TagSet': [
{
'Key': tag_key,
'Value': tag_value
},
]
}
)

View file

@ -12,4 +12,4 @@ fastapi==0.96.0
uvicorn[standard]==0.22.0
python-decouple==3.8
pydantic[email]==1.10.8
apscheduler==3.10.1
apscheduler==3.10.1

View file

@ -14,4 +14,4 @@ python-decouple==3.8
pydantic[email]==1.10.8
apscheduler==3.10.1
redis==4.5.5
redis==4.5.5

View file

@ -13,6 +13,7 @@ import (
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/metrics"
assetsMetrics "openreplay/backend/pkg/metrics/assets"
"openreplay/backend/pkg/objectstorage/store"
"openreplay/backend/pkg/queue"
)
@ -24,7 +25,14 @@ func main() {
cfg := config.New()
cacher := cacher.NewCacher(cfg)
objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil {
log.Fatalf("Error on object storage creation: %v", err)
}
cacher, err := cacher.NewCacher(cfg, objStore)
if err != nil {
log.Fatalf("Error on cacher creation: %v", err)
}
msgHandler := func(msg messages.Message) {
switch m := msg.(type) {

View file

@ -36,7 +36,10 @@ func main() {
defer dbConn.Close()
// Build all services
services := services.New(cfg, producer, dbConn)
services, err := services.New(cfg, producer, dbConn)
if err != nil {
log.Fatalf("failed while creating services: %s", err)
}
// Init server's routes
router, err := router.NewRouter(cfg, services)

View file

@ -13,8 +13,8 @@ import (
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/metrics"
storageMetrics "openreplay/backend/pkg/metrics/storage"
"openreplay/backend/pkg/objectstorage/store"
"openreplay/backend/pkg/queue"
cloud "openreplay/backend/pkg/storage"
)
func main() {
@ -25,8 +25,11 @@ func main() {
cfg := config.New()
s3 := cloud.NewS3(cfg.S3Region, cfg.S3Bucket, cfg.UseFileTags())
srv, err := storage.New(cfg, s3)
objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil {
log.Fatalf("can't init object storage: %s", err)
}
srv, err := storage.New(cfg, objStore)
if err != nil {
log.Printf("can't init storage service: %s", err)
return

View file

@ -1,10 +1,11 @@
module openreplay/backend
go 1.19
go 1.18
require (
cloud.google.com/go/logging v1.4.2
github.com/ClickHouse/clickhouse-go/v2 v2.9.1
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0
github.com/ClickHouse/clickhouse-go/v2 v2.2.0
github.com/Masterminds/semver v1.5.0
github.com/andybalholm/brotli v1.0.5
github.com/aws/aws-sdk-go v1.44.98
@ -35,11 +36,11 @@ require (
cloud.google.com/go/compute v1.6.1 // indirect
cloud.google.com/go/iam v0.3.0 // indirect
cloud.google.com/go/storage v1.14.0 // indirect
github.com/ClickHouse/ch-go v0.52.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/go-faster/city v1.0.1 // indirect
github.com/go-faster/errors v0.6.1 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.9 // indirect
@ -52,19 +53,19 @@ require (
github.com/jackc/puddle v1.2.2-0.20220404125616-4e959849469a // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/paulmach/orb v0.9.0 // indirect
github.com/pierrec/lz4/v4 v4.1.17 // indirect
github.com/paulmach/orb v0.7.1 // indirect
github.com/pierrec/lz4/v4 v4.1.15 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/segmentio/asm v1.2.0 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/stretchr/testify v1.8.2 // indirect
go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/otel v1.13.0 // indirect
go.opentelemetry.io/otel/trace v1.13.0 // indirect
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect
go.opentelemetry.io/otel v1.7.0 // indirect
go.opentelemetry.io/otel/trace v1.7.0 // indirect
golang.org/x/crypto v0.6.0 // indirect
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect
golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 // indirect
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/text v0.8.0 // indirect
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
@ -73,5 +74,4 @@ require (
google.golang.org/grpc v1.47.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View file

@ -59,15 +59,24 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
cloud.google.com/go/storage v1.14.0 h1:6RRlFMv1omScs6iq2hfE3IvgE+l6RfJPampq8UZc5TU=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0 h1:rTnT/Jrcm+figWlYz4Ixzt0SJVR2cMC8lvZcimipiEY=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.4.0/go.mod h1:ON4tFdPTwRcgWEaVDrN3584Ef+b7GgSJaXxe5fW9t4M=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2 h1:uqM+VoHjVH6zdlkLF2b6O0ZANcHoj3rO0PoQ3jglUJA=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.2.2/go.mod h1:twTKAa1E6hLmSDjLhaCkbTMQKc7p/rNLU40rLxGEOCI=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0 h1:leh5DwKv6Ihwi+h60uHtn6UWAxBbZ0q8DwQVMzf61zw=
github.com/Azure/azure-sdk-for-go/sdk/internal v1.2.0/go.mod h1:eWRD7oawr1Mu1sLCawqVc0CUiF43ia3qQMxLscsKQ9w=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0 h1:u/LLAOFgsMv7HmNL4Qufg58y+qElGOt5qv0z1mURkRY=
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.0.0/go.mod h1:2e8rMJtl2+2j+HXbTBwnyGpm5Nou7KhvSfxOq8JpTag=
github.com/AzureAD/microsoft-authentication-library-for-go v0.9.0 h1:UE9n9rkJF62ArLb1F3DEjRt8O3jLwMWdSoypKV4f3MU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ClickHouse/ch-go v0.52.1 h1:nucdgfD1BDSHjbNaG3VNebonxJzD8fX8jbuBpfo5VY0=
github.com/ClickHouse/ch-go v0.52.1/go.mod h1:B9htMJ0hii/zrC2hljUKdnagRBuLqtRG/GrU3jqCwRk=
github.com/ClickHouse/clickhouse-go/v2 v2.9.1 h1:IeE2bwVvAba7Yw5ZKu98bKI4NpDmykEy6jUaQdJJCk8=
github.com/ClickHouse/clickhouse-go/v2 v2.9.1/go.mod h1:teXfZNM90iQ99Jnuht+dxQXCuhDZ8nvvMoTJOFrcmcg=
github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
github.com/ClickHouse/clickhouse-go/v2 v2.2.0 h1:dj00TDKY+xwuTJdbpspCSmTLFyWzRJerTHwaBxut1C0=
github.com/ClickHouse/clickhouse-go/v2 v2.2.0/go.mod h1:8f2XZUi7XoeU+uPIytSi1cvx8fmJxi7vIgqpvYTF1+o=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/actgardner/gogen-avro/v10 v10.1.0/go.mod h1:o+ybmVjEa27AAr35FRqU98DJu1fXES56uXniYFv4yDA=
github.com/actgardner/gogen-avro/v10 v10.2.1/go.mod h1:QUhjeHPchheYmMDni/Nx7VB0RsT/ee8YIgGY/xpEQgQ=
github.com/actgardner/gogen-avro/v9 v9.1.0/go.mod h1:nyTj6wPqDJoxM3qdnjcLv+EnMDSDFqE0qDpva2QRmKc=
@ -86,6 +95,7 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
@ -105,6 +115,7 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@ -126,6 +137,7 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dnaeon/go-vcr v1.1.0 h1:ReYa/UBrRyQdant9B4fNHGoCNKw6qh6P0fsdGmZpR7c=
github.com/elastic/go-elasticsearch/v7 v7.13.1 h1:PaM3V69wPlnwR+ne50rSKKn0RNDYnnOFQcuGEI0ce80=
github.com/elastic/go-elasticsearch/v7 v7.13.1/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
@ -144,10 +156,6 @@ github.com/frankban/quicktest v1.10.0/go.mod h1:ui7WezCLWMWxVWr1GETZY3smRy0G4KWq
github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09mUdL/ijj8og=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-faster/city v1.0.1 h1:4WAxSZ3V2Ws4QRDrscLEDcibJY8uf41H6AhXDrNDcGw=
github.com/go-faster/city v1.0.1/go.mod h1:jKcUJId49qdW3L1qKHH/3wPeUstCVpVSXTM6vO3VcTw=
github.com/go-faster/errors v0.6.1 h1:nNIPOBkprlKzkThvS/0YaX8Zs9KewLCOSFQS5BU06FI=
github.com/go-faster/errors v0.6.1/go.mod h1:5MGV2/2T9yvlrbhe9pD9LO5Z/2zCSq2T8j+Jpi2LAyY=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@ -157,13 +165,20 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -252,8 +267,10 @@ github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/Oth
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk=
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hamba/avro v1.5.6/go.mod h1:3vNT0RLXXpFm2Tb/5KC71ZRJlOroggq1Rcitb6k4Fr8=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -320,6 +337,7 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
@ -334,7 +352,6 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
@ -352,6 +369,7 @@ github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
@ -366,14 +384,15 @@ github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mkevac/debugcharts v0.0.0-20191222103121-ae1c48aa8615/go.mod h1:Ad7oeElCZqA1Ufj0U9/liOF4BtVepxRcTvr2ey7zTvM=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nrwiersma/avro-benchmarks v0.0.0-20210913175520-21aec48c8f76/go.mod h1:iKyFMidsk/sVYONJRE372sJuX/QTRPacU7imPqqsu7g=
@ -384,11 +403,13 @@ github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/oschwald/maxminddb-golang v1.7.0 h1:JmU4Q1WBv5Q+2KZy5xJI+98aUwTIrPPxZUkd5Cwr8Zc=
github.com/oschwald/maxminddb-golang v1.7.0/go.mod h1:RXZtst0N6+FY/3qCNmZMBApR19cdQj43/NM9VkrNAis=
github.com/paulmach/orb v0.9.0 h1:MwA1DqOKtvCgm7u9RZ/pnYejTeDJPnr0+0oFajBbJqk=
github.com/paulmach/orb v0.9.0/go.mod h1:SudmOk85SXtmXAB3sLGyJ6tZy/8pdfrV0o6ef98Xc30=
github.com/paulmach/orb v0.7.1 h1:Zha++Z5OX/l168sqHK3k4z18LDvr+YAO/VjK0ReQ9rU=
github.com/paulmach/orb v0.7.1/go.mod h1:FWRlTgl88VI1RBx/MkrwWDRhQ96ctqMCh8boXhmqB/A=
github.com/paulmach/protoscan v0.2.1/go.mod h1:SpcSwydNLrxUGSDvXvO0P7g7AuhJ7lcKfDlhJCDw2gY=
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4/v4 v4.1.15 h1:MO0/ucJhngq7299dKLwIMtgTfbkoSPF6AoMYDd8Q4q0=
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -429,10 +450,11 @@ github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OK
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/santhosh-tekuri/jsonschema/v5 v5.0.0/go.mod h1:FKdcjfQW6rpZSnxxUvEA5H/cDPdvJ/SZJQLWWXWGrZ0=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys=
github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs=
github.com/sethvargo/go-envconfig v0.7.0 h1:P/ljQXSRjgAgsnIripHs53Jg/uNVXu2FYQ9yLSDappA=
github.com/sethvargo/go-envconfig v0.7.0/go.mod h1:00S1FAhRUuTNJazWBWcJGvEHOM+NO6DhoRMAOX7FY5o=
github.com/shirou/gopsutil v2.19.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
@ -444,6 +466,8 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@ -452,23 +476,23 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce h1:fb190+cK2Xz/dvi9Hv8eCYJYvIGUTN2/KLq1pT6CjEc=
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe h1:aj/vX5epIlQQBEocKoM9nSAiNpakdQzElc8SaRFPu+I=
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe/go.mod h1:OBcG9bn7sHtXgarhUEb3OfCnNsgtGnkVf41ilSZ3K3E=
github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI=
github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g=
github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8=
github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
go.mongodb.org/mongo-driver v1.11.1/go.mod h1:s7p5vEtfbeR1gYi6pnj3c3/urpbLv2T5Sfd6Rp2HBB8=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@ -477,10 +501,10 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/otel v1.13.0 h1:1ZAKnNQKwBBxFtww/GwxNUyTf0AxkZzrukO8MeXqe4Y=
go.opentelemetry.io/otel v1.13.0/go.mod h1:FH3RtdZCzRkJYFTCsAKDy9l/XYjMdNv6QrkFFB8DvVg=
go.opentelemetry.io/otel/trace v1.13.0 h1:CBgRZ6ntv+Amuj1jDsMhZtlAPT6gbyIRdaIzFhfBSdY=
go.opentelemetry.io/otel/trace v1.13.0/go.mod h1:muCvmmO9KKpvuXSf3KKAXXB2ygNYHQ+ZfI5X08d3tds=
go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM=
go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=
go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
@ -498,8 +522,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -577,7 +601,6 @@ golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLd
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
@ -618,9 +641,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 h1:w8s32wxx3sY+OjLlv9qltkLU5yvJzxjjgiHWLjdIcw4=
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7 h1:ZrnxWX62AgTKOSagEqxvb3ffipvEDX2pl7E1TdqLqIc=
golang.org/x/sync v0.0.0-20220923202941-7f9b1623fab7/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -638,8 +660,10 @@ golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220220014-0732a990476f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -693,6 +717,7 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220429233432-b5fbb4746d32/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=

View file

@ -9,25 +9,25 @@ import (
"log"
"mime"
"net/http"
metrics "openreplay/backend/pkg/metrics/assets"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
config "openreplay/backend/internal/config/assets"
"openreplay/backend/pkg/storage"
metrics "openreplay/backend/pkg/metrics/assets"
"openreplay/backend/pkg/objectstorage"
"openreplay/backend/pkg/url/assets"
"github.com/pkg/errors"
)
const MAX_CACHE_DEPTH = 5
type cacher struct {
timeoutMap *timeoutMap // Concurrency implemented
s3 *storage.S3 // AWS Docs: "These clients are safe to use concurrently."
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
rewriter *assets.Rewriter // Read only
timeoutMap *timeoutMap // Concurrency implemented
objStorage objectstorage.ObjectStorage // AWS Docs: "These clients are safe to use concurrently."
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
rewriter *assets.Rewriter // Read only
Errors chan error
sizeLimit int
requestHeaders map[string]string
@ -38,7 +38,14 @@ func (c *cacher) CanCache() bool {
return c.workers.CanAddTask()
}
func NewCacher(cfg *config.Config) *cacher {
func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage) (*cacher, error) {
switch {
case cfg == nil:
return nil, errors.New("config is nil")
case store == nil:
return nil, errors.New("object storage is nil")
}
rewriter := assets.NewRewriter(cfg.AssetsOrigin)
tlsConfig := &tls.Config{
@ -71,7 +78,7 @@ func NewCacher(cfg *config.Config) *cacher {
c := &cacher{
timeoutMap: newTimeoutMap(),
s3: storage.NewS3(cfg.AWSRegion, cfg.S3BucketAssets, cfg.UseFileTags()),
objStorage: store,
httpClient: &http.Client{
Timeout: time.Duration(6) * time.Second,
Transport: &http.Transport{
@ -85,7 +92,7 @@ func NewCacher(cfg *config.Config) *cacher {
requestHeaders: cfg.AssetsRequestHeaders,
}
c.workers = NewPool(64, c.CacheFile)
return c
return c, nil
}
func (c *cacher) CacheFile(task *Task) {
@ -151,7 +158,7 @@ func (c *cacher) cacheURL(t *Task) {
// TODO: implement in streams
start = time.Now()
err = c.s3.Upload(strings.NewReader(strData), t.cachePath, contentType, storage.NoCompression)
err = c.objStorage.Upload(strings.NewReader(strData), t.cachePath, contentType, objectstorage.NoCompression)
if err != nil {
metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true)
c.Errors <- errors.Wrap(err, t.urlContext)
@ -198,7 +205,7 @@ func (c *cacher) checkTask(newTask *Task) {
return
}
c.timeoutMap.add(cachePath)
crTime := c.s3.GetCreationTime(cachePath)
crTime := c.objStorage.GetCreationTime(cachePath)
if crTime != nil && crTime.After(time.Now().Add(-MAX_STORAGE_TIME)) {
return
}

View file

@ -3,14 +3,14 @@ package assets
import (
"openreplay/backend/internal/config/common"
"openreplay/backend/internal/config/configurator"
"openreplay/backend/internal/config/objectstorage"
)
type Config struct {
common.Config
objectstorage.ObjectsConfig
GroupCache string `env:"GROUP_CACHE,required"`
TopicCache string `env:"TOPIC_CACHE,required"`
AWSRegion string `env:"AWS_REGION,required"`
S3BucketAssets string `env:"S3_BUCKET_ASSETS,required"`
AssetsOrigin string `env:"ASSETS_ORIGIN,required"`
AssetsSizeLimit int `env:"ASSETS_SIZE_LIMIT,required"`
AssetsRequestHeaders map[string]string `env:"ASSETS_REQUEST_HEADERS"`

View file

@ -9,7 +9,6 @@ type Config struct {
MessageSizeLimit int `env:"QUEUE_MESSAGE_SIZE_LIMIT,default=1048576"`
MaxMemoryUsage uint64 `env:"MAX_MEMORY_USAGE,default=80"`
MemoryLimitMB uint64 `env:"MEMORY_LIMIT_MB,default=0"` // 0 means take limit from OS (cgroup)
CloudName string `env:"CLOUD,default=aws"`
}
type Configer interface {
@ -20,10 +19,6 @@ func (c *Config) GetConfigPath() string {
return c.ConfigFilePath
}
func (c *Config) UseFileTags() bool {
return c.CloudName != "azure"
}
// Postgres config
type Postgres struct {

View file

@ -19,8 +19,6 @@ type Config struct {
CompressionThreshold int64 `env:"COMPRESSION_THRESHOLD,default=20000"`
JsonSizeLimit int64 `env:"JSON_SIZE_LIMIT,default=1000"`
FileSizeLimit int64 `env:"FILE_SIZE_LIMIT,default=10000000"`
AWSRegion string `env:"AWS_REGION,required"`
S3BucketIOSImages string `env:"S3_BUCKET_IOS_IMAGES,required"`
TokenSecret string `env:"TOKEN_SECRET,required"`
UAParserFile string `env:"UAPARSER_FILE,required"`
MaxMinDBFile string `env:"MAXMINDDB_FILE,required"`

View file

@ -0,0 +1,20 @@
package objectstorage
// Object storage configuration
type ObjectsConfig struct {
ServiceName string `env:"SERVICE_NAME,required"`
CloudName string `env:"CLOUD,default=aws"`
BucketName string `env:"BUCKET_NAME,required"`
AWSRegion string `env:"AWS_REGION"`
AWSAccessKeyID string `env:"AWS_ACCESS_KEY_ID"`
AWSSecretAccessKey string `env:"AWS_SECRET_ACCESS_KEY"`
AWSEndpoint string `env:"AWS_ENDPOINT"`
AWSSkipSSLValidation bool `env:"AWS_SKIP_SSL_VALIDATION"`
AzureAccountName string `env:"AZURE_ACCOUNT_NAME"`
AzureAccountKey string `env:"AZURE_ACCOUNT_KEY"`
}
func (c *ObjectsConfig) UseFileTags() bool {
return c.CloudName != "azure"
}

View file

@ -3,13 +3,13 @@ package storage
import (
"openreplay/backend/internal/config/common"
"openreplay/backend/internal/config/configurator"
"openreplay/backend/internal/config/objectstorage"
"time"
)
type Config struct {
common.Config
S3Region string `env:"AWS_REGION_WEB,required"`
S3Bucket string `env:"S3_BUCKET_WEB,required"`
objectstorage.ObjectsConfig
FSDir string `env:"FS_DIR,required"`
FileSplitSize int `env:"FILE_SPLIT_SIZE,required"`
RetryTimeout time.Duration `env:"RETRY_TIMEOUT,default=2m"`

View file

@ -9,7 +9,7 @@ import (
"openreplay/backend/internal/http/ios"
"openreplay/backend/internal/http/util"
"openreplay/backend/internal/http/uuid"
"openreplay/backend/pkg/storage"
"openreplay/backend/pkg/objectstorage"
"strconv"
"time"
@ -166,7 +166,7 @@ func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request)
key := prefix + fileHeader.Filename
log.Printf("Uploading image... %v", util.SafeString(key))
go func() { //TODO: mime type from header
if err := e.services.Storage.Upload(file, key, "image/jpeg", storage.NoCompression); err != nil {
if err := e.services.Storage.Upload(file, key, "image/jpeg", objectstorage.NoCompression); err != nil {
log.Printf("Upload ios screen error. %v", err)
}
}()

View file

@ -104,10 +104,6 @@ func (e *Router) init() {
"/v1/web/not-started": e.notStartedHandlerWeb,
"/v1/web/start": e.startSessionHandlerWeb,
"/v1/web/i": e.pushMessagesHandlerWeb,
"/v1/ios/start": e.startSessionHandlerIOS,
"/v1/ios/i": e.pushMessagesHandlerIOS,
"/v1/ios/late": e.pushLateMessagesHandlerIOS,
"/v1/ios/images": e.imagesUploadHandlerIOS,
}
prefix := "/ingest"

View file

@ -7,7 +7,6 @@ import (
"openreplay/backend/pkg/db/cache"
"openreplay/backend/pkg/flakeid"
"openreplay/backend/pkg/queue/types"
"openreplay/backend/pkg/storage"
"openreplay/backend/pkg/token"
)
@ -18,17 +17,15 @@ type ServicesBuilder struct {
UaParser *uaparser.UAParser
GeoIP geoip.GeoParser
Tokenizer *token.Tokenizer
Storage *storage.S3
}
func New(cfg *http.Config, producer types.Producer, pgconn *cache.PGCache) *ServicesBuilder {
func New(cfg *http.Config, producer types.Producer, pgconn *cache.PGCache) (*ServicesBuilder, error) {
return &ServicesBuilder{
Database: pgconn,
Producer: producer,
Storage: storage.NewS3(cfg.AWSRegion, cfg.S3BucketIOSImages, cfg.UseFileTags()),
Tokenizer: token.NewTokenizer(cfg.TokenSecret),
UaParser: uaparser.NewUAParser(cfg.UAParserFile),
GeoIP: geoip.New(cfg.MaxMinDBFile),
Flaker: flakeid.NewFlaker(cfg.WorkerID),
}
}, nil
}

View file

@ -6,18 +6,17 @@ import (
"github.com/andybalholm/brotli"
"io"
"log"
"openreplay/backend/pkg/objectstorage"
"os"
"strconv"
"strings"
"sync"
"time"
gzip "github.com/klauspost/pgzip"
config "openreplay/backend/internal/config/storage"
"openreplay/backend/pkg/messages"
metrics "openreplay/backend/pkg/metrics/storage"
"openreplay/backend/pkg/storage"
gzip "github.com/klauspost/pgzip"
)
type FileType string
@ -71,23 +70,23 @@ func NewBreakTask() *Task {
type Storage struct {
cfg *config.Config
s3 *storage.S3
objStorage objectstorage.ObjectStorage
startBytes []byte
compressionTasks chan *Task // brotli compression or gzip compression with encryption
uploadingTasks chan *Task // upload to s3
workersStopped chan struct{}
}
func New(cfg *config.Config, s3 *storage.S3) (*Storage, error) {
func New(cfg *config.Config, objStorage objectstorage.ObjectStorage) (*Storage, error) {
switch {
case cfg == nil:
return nil, fmt.Errorf("config is empty")
case s3 == nil:
return nil, fmt.Errorf("s3 storage is empty")
case objStorage == nil:
return nil, fmt.Errorf("object storage is empty")
}
newStorage := &Storage{
cfg: cfg,
s3: s3,
objStorage: objStorage,
startBytes: make([]byte, cfg.FileSplitSize),
compressionTasks: make(chan *Task, 1),
uploadingTasks: make(chan *Task, 1),
@ -378,9 +377,9 @@ func (s *Storage) uploadSession(task *Task) {
uploadDome int64 = 0
uploadDev int64 = 0
)
compression := storage.NoCompression
compression := objectstorage.NoCompression
if task.key == "" {
compression = storage.Brotli
compression = objectstorage.Brotli
}
go func() {
if task.doms != nil {
@ -388,7 +387,7 @@ func (s *Storage) uploadSession(task *Task) {
metrics.RecordSessionCompressionRatio(task.domsRawSize/float64(task.doms.Len()), DOM.String())
// Upload session to s3
start := time.Now()
if err := s.s3.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", compression); err != nil {
if err := s.objStorage.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", compression); err != nil {
log.Fatalf("Storage: start upload failed. %s", err)
}
uploadDoms = time.Now().Sub(start).Milliseconds()
@ -401,7 +400,7 @@ func (s *Storage) uploadSession(task *Task) {
metrics.RecordSessionCompressionRatio(task.domeRawSize/float64(task.dome.Len()), DOM.String())
// Upload session to s3
start := time.Now()
if err := s.s3.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", compression); err != nil {
if err := s.objStorage.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", compression); err != nil {
log.Fatalf("Storage: start upload failed. %s", err)
}
uploadDome = time.Now().Sub(start).Milliseconds()
@ -414,7 +413,7 @@ func (s *Storage) uploadSession(task *Task) {
metrics.RecordSessionCompressionRatio(task.devRawSize/float64(task.dev.Len()), DEV.String())
// Upload session to s3
start := time.Now()
if err := s.s3.Upload(task.dev, task.id+string(DEV), "application/octet-stream", compression); err != nil {
if err := s.objStorage.Upload(task.dev, task.id+string(DEV), "application/octet-stream", compression); err != nil {
log.Fatalf("Storage: start upload failed. %s", err)
}
uploadDev = time.Now().Sub(start).Milliseconds()

View file

@ -1,41 +0,0 @@
package env
import (
"crypto/tls"
"log"
"net/http"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
_session "github.com/aws/aws-sdk-go/aws/session"
)
func AWSSessionOnRegion(region string) *_session.Session {
AWS_ACCESS_KEY_ID := String("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY := String("AWS_SECRET_ACCESS_KEY")
config := &aws.Config{
Region: aws.String(region),
Credentials: credentials.NewStaticCredentials(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, ""),
}
AWS_ENDPOINT := StringOptional("AWS_ENDPOINT")
if AWS_ENDPOINT != "" {
config.Endpoint = aws.String(AWS_ENDPOINT)
config.DisableSSL = aws.Bool(true)
config.S3ForcePathStyle = aws.Bool(true)
AWS_SKIP_SSL_VALIDATION := Bool("AWS_SKIP_SSL_VALIDATION")
if AWS_SKIP_SSL_VALIDATION {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
config.HTTPClient = client
}
}
aws_session, err := _session.NewSession(config)
if err != nil {
log.Printf("AWS session error: %v\n", err)
log.Fatal("AWS session error")
}
return aws_session
}

View file

@ -0,0 +1,21 @@
package objectstorage
import (
"io"
"time"
)
type CompressionType int
const (
NoCompression CompressionType = iota
Gzip
Brotli
)
type ObjectStorage interface {
Upload(reader io.Reader, key string, contentType string, compression CompressionType) error
Get(key string) (io.ReadCloser, error)
Exists(key string) bool
GetCreationTime(key string) *time.Time
}

View file

@ -0,0 +1,166 @@
package s3
import (
"crypto/tls"
"fmt"
"io"
"net/http"
"net/url"
"os"
"sort"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/credentials"
_session "github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
objConfig "openreplay/backend/internal/config/objectstorage"
"openreplay/backend/pkg/objectstorage"
)
const MAX_RETURNING_COUNT = 40
type storageImpl struct {
uploader *s3manager.Uploader
svc *s3.S3
bucket *string
fileTag string
}
func NewS3(cfg *objConfig.ObjectsConfig) (objectstorage.ObjectStorage, error) {
if cfg == nil {
return nil, fmt.Errorf("s3 config is nil")
}
config := &aws.Config{
Region: aws.String(cfg.AWSRegion),
Credentials: credentials.NewStaticCredentials(cfg.AWSAccessKeyID, cfg.AWSSecretAccessKey, ""),
}
if cfg.AWSEndpoint != "" {
config.Endpoint = aws.String(cfg.AWSEndpoint)
config.DisableSSL = aws.Bool(true)
config.S3ForcePathStyle = aws.Bool(true)
if cfg.AWSSkipSSLValidation {
tr := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
client := &http.Client{Transport: tr}
config.HTTPClient = client
}
}
sess, err := _session.NewSession(config)
if err != nil {
return nil, fmt.Errorf("AWS session error: %v", err)
}
return &storageImpl{
uploader: s3manager.NewUploader(sess),
svc: s3.New(sess), // AWS Docs: "These clients are safe to use concurrently."
bucket: &cfg.BucketName,
fileTag: loadFileTag(),
}, nil
}
func (s *storageImpl) tagging() *string {
return &s.fileTag
}
func (s *storageImpl) Upload(reader io.Reader, key string, contentType string, compression objectstorage.CompressionType) error {
cacheControl := "max-age=2628000, immutable, private"
var contentEncoding *string
switch compression {
case objectstorage.Gzip:
gzipStr := "gzip"
contentEncoding = &gzipStr
case objectstorage.Brotli:
gzipStr := "br"
contentEncoding = &gzipStr
}
_, err := s.uploader.Upload(&s3manager.UploadInput{
Body: reader,
Bucket: s.bucket,
Key: &key,
ContentType: &contentType,
CacheControl: &cacheControl,
ContentEncoding: contentEncoding,
Tagging: s.tagging(),
})
return err
}
func (s *storageImpl) Get(key string) (io.ReadCloser, error) {
out, err := s.svc.GetObject(&s3.GetObjectInput{
Bucket: s.bucket,
Key: &key,
})
if err != nil {
return nil, err
}
return out.Body, nil
}
func (s *storageImpl) Exists(key string) bool {
_, err := s.svc.HeadObject(&s3.HeadObjectInput{
Bucket: s.bucket,
Key: &key,
})
if err == nil {
return true
}
return false
}
func (s *storageImpl) GetCreationTime(key string) *time.Time {
ans, err := s.svc.HeadObject(&s3.HeadObjectInput{
Bucket: s.bucket,
Key: &key,
})
if err != nil {
return nil
}
return ans.LastModified
}
func (s *storageImpl) GetFrequentlyUsedKeys(projectID uint64) ([]string, error) {
prefix := strconv.FormatUint(projectID, 10) + "/"
output, err := s.svc.ListObjectsV2(&s3.ListObjectsV2Input{
Bucket: s.bucket,
Prefix: &prefix,
})
if err != nil {
return nil, err
}
//pagination may be here
list := output.Contents
max := len(list)
if max > MAX_RETURNING_COUNT {
max = MAX_RETURNING_COUNT
sort.Slice(list, func(i, j int) bool {
return list[i].LastModified.After(*(list[j].LastModified))
})
}
var keyList []string
st := len(prefix)
for _, obj := range list[:max] {
keyList = append(keyList, (*obj.Key)[st:])
}
return keyList, nil
}
func loadFileTag() string {
// Load file tag from env
key := "retention"
value := os.Getenv("RETENTION")
if value == "" {
value = "default"
}
// Create URL encoded tag set for file
params := url.Values{}
params.Add(key, value)
return params.Encode()
}

View file

@ -0,0 +1,15 @@
package store
import (
"errors"
objConfig "openreplay/backend/internal/config/objectstorage"
"openreplay/backend/pkg/objectstorage"
"openreplay/backend/pkg/objectstorage/s3"
)
func NewStore(cfg *objConfig.ObjectsConfig) (objectstorage.ObjectStorage, error) {
if cfg == nil {
return nil, errors.New("object storage config is empty")
}
return s3.NewS3(cfg)
}

View file

@ -1,149 +0,0 @@
package storage
import (
"io"
"net/url"
"os"
"sort"
"strconv"
"time"
_s3 "github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"openreplay/backend/pkg/env"
)
type S3 struct {
uploader *s3manager.Uploader
svc *_s3.S3
bucket *string
fileTag string
useTags bool
}
func NewS3(region string, bucket string, useTags bool) *S3 {
sess := env.AWSSessionOnRegion(region)
return &S3{
uploader: s3manager.NewUploader(sess),
svc: _s3.New(sess), // AWS Docs: "These clients are safe to use concurrently."
bucket: &bucket,
fileTag: loadFileTag(),
useTags: useTags,
}
}
type CompressionType int
const (
NoCompression CompressionType = iota
Gzip
Brotli
)
func (s3 *S3) tagging() *string {
if s3.useTags {
return &s3.fileTag
}
return nil
}
func (s3 *S3) Upload(reader io.Reader, key string, contentType string, compression CompressionType) error {
cacheControl := "max-age=2628000, immutable, private"
var contentEncoding *string
switch compression {
case Gzip:
gzipStr := "gzip"
contentEncoding = &gzipStr
case Brotli:
gzipStr := "br"
contentEncoding = &gzipStr
}
_, err := s3.uploader.Upload(&s3manager.UploadInput{
Body: reader,
Bucket: s3.bucket,
Key: &key,
ContentType: &contentType,
CacheControl: &cacheControl,
ContentEncoding: contentEncoding,
Tagging: s3.tagging(),
})
return err
}
func (s3 *S3) Get(key string) (io.ReadCloser, error) {
out, err := s3.svc.GetObject(&_s3.GetObjectInput{
Bucket: s3.bucket,
Key: &key,
})
if err != nil {
return nil, err
}
return out.Body, nil
}
func (s3 *S3) Exists(key string) bool {
_, err := s3.svc.HeadObject(&_s3.HeadObjectInput{
Bucket: s3.bucket,
Key: &key,
})
if err == nil {
return true
}
return false
}
func (s3 *S3) GetCreationTime(key string) *time.Time {
ans, err := s3.svc.HeadObject(&_s3.HeadObjectInput{
Bucket: s3.bucket,
Key: &key,
})
if err != nil {
return nil
}
return ans.LastModified
}
const MAX_RETURNING_COUNT = 40
func (s3 *S3) GetFrequentlyUsedKeys(projectID uint64) ([]string, error) {
prefix := strconv.FormatUint(projectID, 10) + "/"
output, err := s3.svc.ListObjectsV2(&_s3.ListObjectsV2Input{
Bucket: s3.bucket,
Prefix: &prefix,
})
if err != nil {
return nil, err
}
//pagination may be here
list := output.Contents
max := len(list)
if max > MAX_RETURNING_COUNT {
max = MAX_RETURNING_COUNT
sort.Slice(list, func(i, j int) bool {
return list[i].LastModified.After(*(list[j].LastModified))
})
}
var keyList []string
s := len(prefix)
for _, obj := range list[:max] {
keyList = append(keyList, (*obj.Key)[s:])
}
return keyList, nil
}
func loadFileTag() string {
// Load file tag from env
key := "retention"
value := os.Getenv("RETENTION")
if value == "" {
value = "default"
}
// Create URL encoded tag set for file
params := url.Values{}
params.Add(key, value)
return params.Encode()
}

4
ee/api/.gitignore vendored
View file

@ -239,11 +239,13 @@ Pipfile.lock
/chalicelib/utils/jira_client.py
/chalicelib/utils/metrics_helper.py
/chalicelib/utils/pg_client.py
/chalicelib/utils/s3.py
/chalicelib/utils/smtp.py
/chalicelib/utils/sql_helper.py
/chalicelib/utils/strings.py
/chalicelib/utils/TimeUTC.py
/chalicelib/utils/storage/generators.py
/chalicelib/utils/storage/interface.py
/chalicelib/utils/storage/s3.py
/routers/app/__init__.py
/crons/__init__.py
/routers/subs/__init__.py

View file

@ -4,8 +4,10 @@ from decouple import config
import schemas
import schemas_ee
from chalicelib.utils import s3, pg_client, helper, s3_extra
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.storage import StorageClient
from chalicelib.utils.objects import extra
def generate_file_key(project_id, key):
@ -14,12 +16,12 @@ def generate_file_key(project_id, key):
def presign_record(project_id, data: schemas_ee.AssistRecordPayloadSchema, context: schemas_ee.CurrentContext):
key = generate_file_key(project_id=project_id, key=f"{TimeUTC.now()}-{data.name}")
presigned_url = s3.get_presigned_url_for_upload(bucket=config('ASSIST_RECORDS_BUCKET'), expires_in=1800, key=key)
presigned_url = StorageClient.get_presigned_url_for_upload(bucket=config('ASSIST_RECORDS_BUCKET'), expires_in=1800, key=key)
return {"URL": presigned_url, "key": key}
def save_record(project_id, data: schemas_ee.AssistRecordSavePayloadSchema, context: schemas_ee.CurrentContext):
s3_extra.tag_record(file_key=data.key, tag_value=config('RETENTION_L_VALUE', default='vault'))
extra.tag_record(file_key=data.key, tag_value=config('RETENTION_L_VALUE', default='vault'))
params = {"user_id": context.user_id, "project_id": project_id, **data.dict()}
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
@ -30,7 +32,7 @@ def save_record(project_id, data: schemas_ee.AssistRecordSavePayloadSchema, cont
params)
cur.execute(query)
result = helper.dict_to_camel_case(cur.fetchone())
result["URL"] = s3.client.generate_presigned_url(
result["URL"] = StorageClient.generate_presigned_url(
'get_object',
Params={'Bucket': config("ASSIST_RECORDS_BUCKET"), 'Key': result.pop("fileKey")},
ExpiresIn=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900)
@ -101,7 +103,7 @@ def get_record(project_id, record_id, context: schemas_ee.CurrentContext):
cur.execute(query)
result = helper.dict_to_camel_case(cur.fetchone())
if result:
result["URL"] = s3.client.generate_presigned_url(
result["URL"] = StorageClient.generate_presigned_url(
'get_object',
Params={'Bucket': config("ASSIST_RECORDS_BUCKET"), 'Key': result.pop("fileKey")},
ExpiresIn=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900)
@ -128,7 +130,7 @@ def update_record(project_id, record_id, data: schemas_ee.AssistRecordUpdatePayl
result = helper.dict_to_camel_case(cur.fetchone())
if not result:
return {"errors": ["record not found"]}
result["URL"] = s3.client.generate_presigned_url(
result["URL"] = StorageClient.generate_presigned_url(
'get_object',
Params={'Bucket': config("ASSIST_RECORDS_BUCKET"), 'Key': result.pop("fileKey")},
ExpiresIn=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900)
@ -148,5 +150,5 @@ def delete_record(project_id, record_id, context: schemas_ee.CurrentContext):
result = helper.dict_to_camel_case(cur.fetchone())
if not result:
return {"errors": ["record not found"]}
s3_extra.tag_record(file_key=result["fileKey"], tag_value=config('RETENTION_D_VALUE', default='default'))
extra.tag_record(file_key=result["fileKey"], tag_value=config('RETENTION_D_VALUE', default='default'))
return {"state": "success"}

View file

@ -7,8 +7,9 @@ from fastapi import HTTPException, status
import schemas
import schemas_ee
from chalicelib.core import funnels, issues, metrics, click_maps, sessions_insights, sessions_mobs, sessions_favorite
from chalicelib.utils import helper, pg_client, s3_extra, s3
from chalicelib.utils import helper, pg_client
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.storage import StorageClient, extra
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
print(">>> Using experimental error search")
@ -291,7 +292,7 @@ def create(project_id, user_id, data: schemas_ee.CardSchema, dashboard=False):
tag = config('RETENTION_L_VALUE', default='vault')
for k in keys:
try:
s3_extra.tag_session(file_key=k, tag_value=tag)
extra.tag_session(file_key=k, tag_value=tag)
except Exception as e:
print(f"!!!Error while tagging: {k} to {tag} for clickMap")
print(str(e))
@ -499,7 +500,7 @@ def delete(project_id, metric_id, user_id):
tag = config('RETENTION_D_VALUE', default='default')
for k in keys:
try:
s3_extra.tag_session(file_key=k, tag_value=tag)
extra.tag_session(file_key=k, tag_value=tag)
except Exception as e:
print(f"!!!Error while tagging: {k} to {tag} for clickMap")
print(str(e))
@ -640,7 +641,7 @@ def make_chart_from_card(project_id, user_id, metric_id, data: schemas.CardChart
__get_mob_keys(project_id=project_id, session_id=raw_metric["data"]["sessionId"])
mob_exists = False
for k in keys:
if s3.exists(bucket=config("sessions_bucket"), key=k):
if StorageClient.exists(bucket=config("sessions_bucket"), key=k):
mob_exists = True
break
if mob_exists:

View file

@ -3,7 +3,7 @@ from fastapi.security import SecurityScopes
import schemas_ee
from chalicelib.core import permissions
from chalicelib.utils import s3
from chalicelib.utils.storage import StorageClient
SCOPES = SecurityScopes([schemas_ee.Permissions.dev_tools])
@ -23,12 +23,12 @@ def get_urls(session_id, project_id, context: schemas_ee.CurrentContext, check_e
return []
results = []
for k in __get_devtools_keys(project_id=project_id, session_id=session_id):
if check_existence and not s3.exists(bucket=config("sessions_bucket"), key=k):
if check_existence and not StorageClient.exists(bucket=config("sessions_bucket"), key=k):
continue
results.append(s3.client.generate_presigned_url(
'get_object',
Params={'Bucket': config("sessions_bucket"), 'Key': k},
ExpiresIn=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900)
results.append(StorageClient.get_presigned_url_for_sharing(
bucket=config("sessions_bucket"),
expires_in=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900),
key=k
))
return results
@ -36,4 +36,4 @@ def get_urls(session_id, project_id, context: schemas_ee.CurrentContext, check_e
def delete_mobs(project_id, session_ids):
for session_id in session_ids:
for k in __get_devtools_keys(project_id=project_id, session_id=session_id):
s3.tag_for_deletion(bucket=config("sessions_bucket"), key=k)
StorageClient.tag_for_deletion(bucket=config("sessions_bucket"), key=k)

View file

@ -2,7 +2,8 @@ from decouple import config
import schemas_ee
from chalicelib.core import sessions, sessions_favorite_exp, sessions_mobs, sessions_devtool
from chalicelib.utils import pg_client, s3_extra
from chalicelib.utils import pg_client
from chalicelib.utils.objects import extra
def add_favorite_session(context: schemas_ee.CurrentContext, project_id, session_id):
@ -48,7 +49,7 @@ def favorite_session(context: schemas_ee.CurrentContext, project_id, session_id)
for k in keys:
try:
s3_extra.tag_session(file_key=k, tag_value=tag)
extra.tag_session(file_key=k, tag_value=tag)
except Exception as e:
print(f"!!!Error while tagging: {k} to {tag} for removal")
print(str(e))
@ -59,7 +60,7 @@ def favorite_session(context: schemas_ee.CurrentContext, project_id, session_id)
for k in keys:
try:
s3_extra.tag_session(file_key=k, tag_value=tag)
extra.tag_session(file_key=k, tag_value=tag)
except Exception as e:
print(f"!!!Error while tagging: {k} to {tag} for vault")
print(str(e))

View file

@ -1,17 +0,0 @@
from decouple import config
from chalicelib.utils import s3
def tag_session(file_key, tag_key='retention', tag_value='vault'):
bucket = config("sessions_bucket")
if not s3.exists(bucket=bucket, key=file_key):
return None
return s3.tag_file(file_key=file_key, bucket=bucket, tag_key=tag_key, tag_value=tag_value)
def tag_record(file_key, tag_key='retention', tag_value='vault'):
bucket = config('ASSIST_RECORDS_BUCKET')
if not s3.exists(bucket=bucket, key=file_key):
return None
return s3.tag_file(file_key=file_key, bucket=bucket, tag_key=tag_key, tag_value=tag_value)

View file

@ -0,0 +1,10 @@
from decouple import config
from .azure_blob import AzureBlobStorage
from .s3 import AmazonS3Storage
# Init global object storage client
if config("CLOUD") == "azure":
StorageClient = AzureBlobStorage()
else:
StorageClient = AmazonS3Storage()

View file

@ -0,0 +1,55 @@
from decouple import config
from datetime import datetime, timedelta
from chalicelib.utils.objects.interface import ObjectStorage
from azure.storage.blob import BlobServiceClient, BlobSasPermissions, generate_blob_sas
class AzureBlobStorage(ObjectStorage):
# Prepare blob storage client
client = BlobServiceClient(
account_url=f"https://{config('AZURE_ACCOUNT_NAME')}.blob.core.windows.net",
credential=config("AZURE_ACCOUNT_KEY"),
)
def exists(self, bucket, key):
return self.client.get_blob_client(bucket, key).exists()
def get_presigned_url_for_sharing(self, bucket, expires_in, key, check_exists=False):
blob_client = self.client.get_blob_client(bucket, key)
if check_exists and not blob_client.exists():
return None
blob_sas = generate_blob_sas(account_name=config("AZURE_ACCOUNT_NAME"),
container_name=bucket,
blob_name=key,
account_key=config("AZURE_ACCOUNT_KEY"),
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(seconds=expires_in),
)
return f"https://{config('AZURE_ACCOUNT_NAME')}.blob.core.windows.net/{bucket}/{key}?{blob_sas}"
def get_presigned_url_for_upload(self, bucket, expires_in, key, **args):
blob_sas = generate_blob_sas(account_name=config("AZURE_ACCOUNT_NAME"),
container_name=bucket,
blob_name=key,
account_key=config("AZURE_ACCOUNT_KEY"),
permission=BlobSasPermissions(write=True),
expiry=datetime.utcnow() + timedelta(seconds=expires_in),
)
return f"https://{config('AZURE_ACCOUNT_NAME')}.blob.core.windows.net/{bucket}/{key}?{blob_sas}"
def get_file(self, source_bucket, source_key):
blob_client = self.client.get_blob_client(source_bucket, source_key)
return blob_client.download_blob().readall()
def tag_for_deletion(self, bucket, key):
blob_client = self.client.get_blob_client(bucket, key)
if not blob_client.exists():
return False
blob_tags = blob_client.get_blob_tags()
blob_client.start_copy_from_url(
source_url=f"https://{config('AZURE_ACCOUNT_NAME')}.blob.core.windows.net/{bucket}/{key}",
requires_sync=True,
)
blob_tags["to_delete_in_days"] = config("SCH_DELETE_DAYS", default='7')
blob_client.set_blob_tags(blob_tags)

View file

@ -0,0 +1,16 @@
from decouple import config
from chalicelib.utils.storage import StorageClient
def tag_session(file_key, tag_key='retention', tag_value='vault'):
bucket = config("sessions_bucket")
if not StorageClient.exists(bucket=bucket, key=file_key):
return None
return StorageClient.tag_file(file_key=file_key, bucket=bucket, tag_key=tag_key, tag_value=tag_value)
def tag_record(file_key, tag_key='retention', tag_value='vault'):
bucket = config('ASSIST_RECORDS_BUCKET')
if not StorageClient.exists(bucket=bucket, key=file_key):
return None
return StorageClient.tag_file(file_key=file_key, bucket=bucket, tag_key=tag_key, tag_value=tag_value)

View file

@ -59,11 +59,13 @@ rm -rf ./chalicelib/utils/helper.py
rm -rf ./chalicelib/utils/jira_client.py
rm -rf ./chalicelib/utils/metrics_helper.py
rm -rf ./chalicelib/utils/pg_client.py
rm -rf ./chalicelib/utils/s3.py
rm -rf ./chalicelib/utils/smtp.py
rm -rf ./chalicelib/utils/sql_helper.py
rm -rf ./chalicelib/utils/strings.py
rm -rf ./chalicelib/utils/TimeUTC.py
rm -rf ./chalicelib/utils/storage/generators.py
rm -rf ./chalicelib/utils/storage/interface.py
rm -rf ./chalicelib/utils/storage/s3.py
rm -rf ./routers/app/__init__.py
rm -rf ./crons/__init__.py
rm -rf ./routers/subs/__init__.py

View file

@ -16,4 +16,5 @@ apscheduler==3.10.1
clickhouse-driver==0.2.5
clickhouse-driver[lz4]==0.2.5
python-multipart==0.0.5
python-multipart==0.0.5
azure-storage-blob==12.16.0

View file

@ -15,3 +15,4 @@ apscheduler==3.10.1
clickhouse-driver==0.2.5
clickhouse-driver[lz4]==0.2.5
redis==4.5.5
azure-storage-blob==12.16.0

View file

@ -24,3 +24,4 @@ python-multipart==0.0.6
redis==4.5.5
#confluent-kafka==2.1.0
azure-storage-blob==12.16.0

View file

@ -0,0 +1,122 @@
package azure
import (
"bytes"
"context"
"fmt"
"io"
"log"
"os"
"strings"
"time"
config "openreplay/backend/internal/config/objectstorage"
"openreplay/backend/pkg/objectstorage"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob"
"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/blob"
)
type storageImpl struct {
client *azblob.Client
container string
tags map[string]string
}
func NewStorage(cfg *config.ObjectsConfig) (objectstorage.ObjectStorage, error) {
if cfg == nil {
return nil, fmt.Errorf("azure config is empty")
}
cred, err := azblob.NewSharedKeyCredential(cfg.AzureAccountName, cfg.AzureAccountKey)
if err != nil {
return nil, fmt.Errorf("cannot create azure credential: %v", err)
}
client, err := azblob.NewClientWithSharedKeyCredential(fmt.Sprintf("https://%s.blob.core.windows.net/",
cfg.AzureAccountName), cred, nil)
if err != nil {
return nil, fmt.Errorf("cannot create azure client: %v", err)
}
return &storageImpl{
client: client,
container: cfg.BucketName,
tags: loadFileTag(),
}, nil
}
func (s *storageImpl) Upload(reader io.Reader, key string, contentType string, compression objectstorage.CompressionType) error {
cacheControl := "max-age=2628000, immutable, private"
var contentEncoding *string
switch compression {
case objectstorage.Gzip:
gzipStr := "gzip"
contentEncoding = &gzipStr
case objectstorage.Brotli:
gzipStr := "br"
contentEncoding = &gzipStr
}
// Remove leading slash to avoid empty folder creation
if strings.HasPrefix(key, "/") {
key = key[1:]
}
_, err := s.client.UploadStream(context.Background(), s.container, key, reader, &azblob.UploadStreamOptions{
HTTPHeaders: &blob.HTTPHeaders{
BlobCacheControl: &cacheControl,
BlobContentEncoding: contentEncoding,
BlobContentType: &contentType,
},
Tags: s.tags,
})
return err
}
func (s *storageImpl) Get(key string) (io.ReadCloser, error) {
ctx := context.Background()
get, err := s.client.DownloadStream(ctx, s.container, key, nil)
if err != nil {
return nil, err
}
downloadedData := bytes.Buffer{}
retryReader := get.NewRetryReader(ctx, &azblob.RetryReaderOptions{})
_, err = downloadedData.ReadFrom(retryReader)
if err != nil {
return nil, err
}
err = retryReader.Close()
return io.NopCloser(bytes.NewReader(downloadedData.Bytes())), err
}
func (s *storageImpl) Exists(key string) bool {
ctx := context.Background()
get, err := s.client.DownloadStream(ctx, s.container, key, nil)
if err != nil {
return false
}
if err := get.Body.Close(); err != nil {
log.Println(err)
}
return true
}
func (s *storageImpl) GetCreationTime(key string) *time.Time {
ctx := context.Background()
get, err := s.client.DownloadStream(ctx, s.container, key, nil)
if err != nil {
return nil
}
if err := get.Body.Close(); err != nil {
log.Println(err)
}
return get.LastModified
}
func loadFileTag() map[string]string {
// Load file tag from env
key := "retention"
value := os.Getenv("RETENTION")
if value == "" {
value = "default"
}
return map[string]string{key: value}
}

View file

@ -0,0 +1,19 @@
package store
import (
"errors"
objConfig "openreplay/backend/internal/config/objectstorage"
"openreplay/backend/pkg/objectstorage"
"openreplay/backend/pkg/objectstorage/azure"
"openreplay/backend/pkg/objectstorage/s3"
)
func NewStore(cfg *objConfig.ObjectsConfig) (objectstorage.ObjectStorage, error) {
if cfg == nil {
return nil, errors.New("object storage config is empty")
}
if cfg.CloudName == "azure" {
return azure.NewStorage(cfg)
}
return s3.NewS3(cfg)
}

View file

@ -75,9 +75,7 @@ resources:
requests:
cpu: 1
memory: 4Gi
limits:
cpu: 2
memory: 8Gi
limits: {}
nodeSelector: {}

View file

@ -69,7 +69,7 @@ spec:
{{- else }}
value: {{ .Values.global.s3.secretKey }}
{{- end }}
- name: S3_BUCKET_ASSETS
- name: BUCKET_NAME
value: {{ .Values.global.s3.assetsBucket }}
- name: LICENSE_KEY
value: '{{ .Values.global.enterpriseEditionLicense }}'

View file

@ -88,9 +88,6 @@ autoscaling:
# targetMemoryUtilizationPercentage: 80
env:
TOKEN_SECRET: secret_token_string # TODO: generate on buld
S3_BUCKET_IOS_IMAGES: sessions-mobile-assets
CACHE_ASSETS: true
HTTP_PORT: 80

View file

@ -88,26 +88,6 @@ spec:
{{- end}}
- name: POSTGRES_STRING
value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:$(pg_password)@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}'
# We need to check what is the object store endpoint.
# There can be 4 options
# 1. Using minio inside kube clster
# 2. Using minio managed external cluster, like aws minio offering
# 3. Using GCP or other object stores compatible with s3 apis
# 4. Using AWS itself.
# AWS uses bucketname.endpoint/object while others use endpoint/bucketname/object
- name: ASSETS_ORIGIN
{{- if contains "minio" .Values.global.s3.endpoint }}
# Local minio Installation
value: '{{ ternary "https" "http" .Values.global.ORSecureAccess}}://{{ .Values.global.domainName }}:{{ ternary .Values.global.ingress.controller.service.ports.https .Values.global.ingress.controller.service.ports.http .Values.global.ORSecureAccess }}/{{.Values.global.s3.assetsBucket}}'
{{- else if contains "amazonaws.com" .Values.global.s3.endpoint }}
# AWS S3
# Ref: https://stackoverflow.com/questions/53634583/go-template-split-string-by-delimiter
# We need https://bucketname.s3endpoint
value: {{ (split "://" .Values.global.s3.endpoint)._0 }}://{{.Values.global.s3.assetsBucket}}.{{ (split "://" .Values.global.s3.endpoint)._1 }}
{{- else }}
# S3 compatible storage
value: '{{ .Values.global.s3.endpoint }}/{{.Values.global.s3.assetsBucket}}'
{{- end }}
{{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }}
ports:
{{- range $key, $val := .Values.service.ports }}

View file

@ -101,7 +101,6 @@ autoscaling:
env:
TOKEN_SECRET: secret_token_string # TODO: generate on buld
S3_BUCKET_IOS_IMAGES: sessions-mobile-assets
CACHE_ASSETS: true

View file

@ -63,13 +63,9 @@ spec:
{{- end }}
- name: AWS_ENDPOINT
value: '{{ .Values.global.s3.endpoint }}'
- name: AWS_REGION_WEB
- name: AWS_REGION
value: '{{ .Values.global.s3.region }}'
- name: AWS_REGION_IOS
value: '{{ .Values.global.s3.region }}'
- name: S3_BUCKET_WEB
value: {{ .Values.global.s3.recordingsBucket }}
- name: S3_BUCKET_IOS
- name: BUCKET_NAME
value: {{ .Values.global.s3.recordingsBucket }}
- name: LICENSE_KEY
value: '{{ .Values.global.enterpriseEditionLicense }}'

View file

@ -29,6 +29,10 @@ spec:
image: "{{ tpl .Values.report.image.repository . }}:{{ .Values.report.image.tag | default .Chart.AppVersion }}-ee"
imagePullPolicy: "{{ .Values.report.image.pullPolicy}}"
env:
{{- range $key, $val := .Values.global.env }}
- name: {{ $key }}
value: '{{ $val }}'
{{- end }}
- name: LICENSE_KEY
value: '{{ .Values.global.enterpriseEditionLicense }}'
- name: version_number

View file

@ -29,6 +29,10 @@ spec:
image: "{{ tpl .Values.sessionsCleaner.image.repository . }}:{{ .Values.sessionsCleaner.image.tag | default .Chart.AppVersion }}-ee"
imagePullPolicy: {{ .Values.sessionsCleaner.image.pullPolicy }}
env:
{{- range $key, $val := .Values.global.env }}
- name: {{ $key }}
value: '{{ $val }}'
{{- end }}
- name: LICENSE_KEY
value: '{{ .Values.global.enterpriseEditionLicense }}'
- name: version_number

View file

@ -29,6 +29,10 @@ spec:
image: "{{ tpl .Values.telemetry.image.repository . }}:{{ .Values.telemetry.image.tag | default .Chart.AppVersion }}-ee"
imagePullPolicy: {{ .Values.telemetry.image.pullPolicy }}
env:
{{- range $key, $val := .Values.global.env }}
- name: {{ $key }}
value: '{{ $val }}'
{{- end }}
- name: LICENSE_KEY
value: '{{ .Values.global.enterpriseEditionLicense }}'
- name: version_number