* feat(backend): implemented azure blob storage support * feat(backend): added azure implementation to backend services * feat(backend): added azure blob storage support to chalice service * fix(backend): removed prev version of s3 * feat(backend): moved azure support to ee subfolder * feat(backend): prepared ee code for new utils.objects package * feat(backend): added missed modules to go.mod * feat(backend): added missed modules to go.sum * feat(backend): go mod tidy * feat(backend): temporary made s3 vars are not required * feat(backend): added azure lib to ee chalice * feat(api): changed azure env var name * feat(api): added new object store and extra methods to chalice ee * fix(api): added azure blob lib to alerts * fix(api): fixed incorrect call in sessions_devtool * fix(crons): added azure blob storage library to requirements list * chore(build): Error message for not providing flag. Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * feat(backend): removed ios headers and object store for ios messages * feat(backend): object config refactoring * chore(helm): Update BUCKET_NAME * fix(backend): removed object storage usage in http * feat(backend): added debug logs to azure upload method * fix(backend): fixed empty folder issue * fix(backend): removed extra debug log * chore(helm): Adding global variables for crons * chore(helm): Remove clickhouse resource limit Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * fix(backend): removed assets debug log * feat(api): use ABC class instead of empty interface * feat(api): renamed helpers to generators * feat(api): changed prep/clean dev scripts * feat(api): changed name obj_store -> StorageClient * feat(api): some changes after code review * fix(api): removed unnecesery packages in oss api * feat(backend): moved azure implementation to ee folder --------- Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> Co-authored-by: rjshrjndrn <rjshrjndrn@gmail.com>
91 lines
2.2 KiB
Go
91 lines
2.2 KiB
Go
package main
|
|
|
|
import (
|
|
"log"
|
|
"os"
|
|
"os/signal"
|
|
"syscall"
|
|
"time"
|
|
|
|
"openreplay/backend/internal/assets"
|
|
"openreplay/backend/internal/assets/cacher"
|
|
config "openreplay/backend/internal/config/assets"
|
|
"openreplay/backend/pkg/messages"
|
|
"openreplay/backend/pkg/metrics"
|
|
assetsMetrics "openreplay/backend/pkg/metrics/assets"
|
|
"openreplay/backend/pkg/objectstorage/store"
|
|
"openreplay/backend/pkg/queue"
|
|
)
|
|
|
|
func main() {
|
|
m := metrics.New()
|
|
m.Register(assetsMetrics.List())
|
|
|
|
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
|
|
|
cfg := config.New()
|
|
|
|
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
|
if err != nil {
|
|
log.Fatalf("Error on object storage creation: %v", err)
|
|
}
|
|
cacher, err := cacher.NewCacher(cfg, objStore)
|
|
if err != nil {
|
|
log.Fatalf("Error on cacher creation: %v", err)
|
|
}
|
|
|
|
msgHandler := func(msg messages.Message) {
|
|
switch m := msg.(type) {
|
|
case *messages.AssetCache:
|
|
cacher.CacheURL(m.SessionID(), m.URL)
|
|
assetsMetrics.IncreaseProcessesSessions()
|
|
// TODO: connect to "raw" topic in order to listen for JSException
|
|
case *messages.JSException:
|
|
sourceList, err := assets.ExtractJSExceptionSources(&m.Payload)
|
|
if err != nil {
|
|
log.Printf("Error on source extraction: %v", err)
|
|
return
|
|
}
|
|
for _, source := range sourceList {
|
|
cacher.CacheJSFile(source)
|
|
}
|
|
}
|
|
}
|
|
|
|
msgConsumer := queue.NewConsumer(
|
|
cfg.GroupCache,
|
|
[]string{cfg.TopicCache},
|
|
messages.NewMessageIterator(msgHandler, []int{messages.MsgAssetCache, messages.MsgJSException}, true),
|
|
true,
|
|
cfg.MessageSizeLimit,
|
|
)
|
|
|
|
log.Printf("Cacher service started\n")
|
|
|
|
sigchan := make(chan os.Signal, 1)
|
|
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
|
|
|
tick := time.Tick(20 * time.Minute)
|
|
for {
|
|
select {
|
|
case sig := <-sigchan:
|
|
log.Printf("Caught signal %v: terminating\n", sig)
|
|
cacher.Stop()
|
|
msgConsumer.Close()
|
|
os.Exit(0)
|
|
case err := <-cacher.Errors:
|
|
log.Printf("Error while caching: %v", err)
|
|
case <-tick:
|
|
cacher.UpdateTimeouts()
|
|
case msg := <-msgConsumer.Rebalanced():
|
|
log.Println(msg)
|
|
default:
|
|
if !cacher.CanCache() {
|
|
continue
|
|
}
|
|
if err := msgConsumer.ConsumeNext(); err != nil {
|
|
log.Fatalf("Error on consumption: %v", err)
|
|
}
|
|
}
|
|
}
|
|
}
|