* feat(backend): implemented azure blob storage support * feat(backend): added azure implementation to backend services * feat(backend): added azure blob storage support to chalice service * fix(backend): removed prev version of s3 * feat(backend): moved azure support to ee subfolder * feat(backend): prepared ee code for new utils.objects package * feat(backend): added missed modules to go.mod * feat(backend): added missed modules to go.sum * feat(backend): go mod tidy * feat(backend): temporary made s3 vars are not required * feat(backend): added azure lib to ee chalice * feat(api): changed azure env var name * feat(api): added new object store and extra methods to chalice ee * fix(api): added azure blob lib to alerts * fix(api): fixed incorrect call in sessions_devtool * fix(crons): added azure blob storage library to requirements list * chore(build): Error message for not providing flag. Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * feat(backend): removed ios headers and object store for ios messages * feat(backend): object config refactoring * chore(helm): Update BUCKET_NAME * fix(backend): removed object storage usage in http * feat(backend): added debug logs to azure upload method * fix(backend): fixed empty folder issue * fix(backend): removed extra debug log * chore(helm): Adding global variables for crons * chore(helm): Remove clickhouse resource limit Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> * fix(backend): removed assets debug log * feat(api): use ABC class instead of empty interface * feat(api): renamed helpers to generators * feat(api): changed prep/clean dev scripts * feat(api): changed name obj_store -> StorageClient * feat(api): some changes after code review * fix(api): removed unnecesery packages in oss api * feat(backend): moved azure implementation to ee folder --------- Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> Co-authored-by: rjshrjndrn <rjshrjndrn@gmail.com>
95 lines
2.2 KiB
Go
95 lines
2.2 KiB
Go
package main
|
|
|
|
import (
|
|
"log"
|
|
"os"
|
|
"os/signal"
|
|
"syscall"
|
|
"time"
|
|
|
|
config "openreplay/backend/internal/config/storage"
|
|
"openreplay/backend/internal/storage"
|
|
"openreplay/backend/pkg/failover"
|
|
"openreplay/backend/pkg/messages"
|
|
"openreplay/backend/pkg/metrics"
|
|
storageMetrics "openreplay/backend/pkg/metrics/storage"
|
|
"openreplay/backend/pkg/objectstorage/store"
|
|
"openreplay/backend/pkg/queue"
|
|
)
|
|
|
|
func main() {
|
|
m := metrics.New()
|
|
m.Register(storageMetrics.List())
|
|
|
|
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
|
|
|
cfg := config.New()
|
|
|
|
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
|
if err != nil {
|
|
log.Fatalf("can't init object storage: %s", err)
|
|
}
|
|
srv, err := storage.New(cfg, objStore)
|
|
if err != nil {
|
|
log.Printf("can't init storage service: %s", err)
|
|
return
|
|
}
|
|
|
|
counter := storage.NewLogCounter()
|
|
sessionFinder, err := failover.NewSessionFinder(cfg, srv)
|
|
if err != nil {
|
|
log.Fatalf("can't init sessionFinder module: %s", err)
|
|
}
|
|
|
|
consumer := queue.NewConsumer(
|
|
cfg.GroupStorage,
|
|
[]string{
|
|
cfg.TopicTrigger,
|
|
},
|
|
messages.NewMessageIterator(
|
|
func(msg messages.Message) {
|
|
sesEnd := msg.(*messages.SessionEnd)
|
|
if err := srv.Process(sesEnd); err != nil {
|
|
log.Printf("upload session err: %s, sessID: %d", err, msg.SessionID())
|
|
sessionFinder.Find(msg.SessionID(), sesEnd.Timestamp)
|
|
}
|
|
// Log timestamp of last processed session
|
|
counter.Update(msg.SessionID(), time.UnixMilli(msg.Meta().Batch().Timestamp()))
|
|
},
|
|
[]int{messages.MsgSessionEnd},
|
|
true,
|
|
),
|
|
false,
|
|
cfg.MessageSizeLimit,
|
|
)
|
|
|
|
log.Printf("Storage service started\n")
|
|
|
|
sigchan := make(chan os.Signal, 1)
|
|
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
|
|
|
counterTick := time.Tick(time.Second * 30)
|
|
for {
|
|
select {
|
|
case sig := <-sigchan:
|
|
log.Printf("Caught signal %v: terminating\n", sig)
|
|
sessionFinder.Stop()
|
|
srv.Wait()
|
|
consumer.Close()
|
|
os.Exit(0)
|
|
case <-counterTick:
|
|
go counter.Print()
|
|
srv.Wait()
|
|
if err := consumer.Commit(); err != nil {
|
|
log.Printf("can't commit messages: %s", err)
|
|
}
|
|
case msg := <-consumer.Rebalanced():
|
|
log.Println(msg)
|
|
default:
|
|
err := consumer.ConsumeNext()
|
|
if err != nil {
|
|
log.Fatalf("Error on consumption: %v", err)
|
|
}
|
|
}
|
|
}
|
|
}
|