Backend for mobile tracker support (#1456)
* feat(backend): handlers for mobile messages * feat(backend): new service template * feat(backend): save mobile session start and send batches to kafka * feat(backend): added mobile support to sink, ender and storage * helm(videostorage): added helm chart for a new service videostorage * fix(backend): added pointer to streing for userBrowser (because it's null for mobile sessions) * feat(backend): added MsgIOSBatchMeta handler to message iterator's logic * feat(backend): added ios ts parser to ender * feat(backend): enabled producing batch of messages to queue * feat(backend): removed iosstart from mob files * feat(backend): added new ios message types * feat(backend): added iosStart and iosEnd * fix(backend): fixed log issue * feat(backend): send tar.gz archives to special queue topic * feat(backend): read raw archives from kafka * fix(backend): added missing file * fix(backend): removed the second file reading * fix(backend): fixed wrong queue topic name * fix(backend): fixed mobile trigger topic name * feat(backend): added tar.gz extractor and iOSSessionEnd handler * feat(backend): debug logs on message uploading * fix(backend): added raw-images topic consumption * feat(backend): now sink send iosSessionEnd to video-storage * feat(backend): added dir creator for new sessions * feat(backend): added correct command to execute * feat(backend): added overwrite option * feat(backend): added s3 uploader for video session replays * feat(backend): new consumer group for mobile sessions * feat(backend): debug logs for uploader * feat(backend): removed unused log * feat(backend): fixed s3 key for video replays * feat(backend): removed debug code * feat(backend): fixed video-storage message filter * fix(backend): added mobileSessionEnd to SessionEnd converter * feat(backend): added first version if db support for mobile events * fix(backend): added swipe events to mob file * feat(backend): added swipe event to pg * feat(backend): split logic into 2 services: image-storage and video-storage * feat(backend): added helm chart for image-storage service * fix(backend): fixed table name for mobile taps * feat(backend): added metadata handler for mobile message parser + fix message filters * feat(backend): added iosRawTopic to DB message consumer * fix(backend): removed value from mobile inputs * feat(backend): removed debug log from iterator * feat(backend): added new apple devices to iOS device parser * fix(backend): added real projectID instead of 0 * feat(backend): extended a list of simulators for device detector * feat(backend): updated networkCall mobile message * fix(backend): added new way to define is network call successed or not * feat(backend): added timezone support for mobile start request * feat(backend): added 2 mobile events Input and Click to mob file * feat(backend): refactored image storage * feat(backend): video storage with 2 workers * feat(backend): added project's platform support * feat(backend): added memory size field for mobile start request * feat(backend): changed video preset for ultrafast * feat(backend): added debug log to http /late handler * feat(backend): added debug log to db service for iosCrash messages * feat(backend): added tapRage event handler to heuristics * feat(backend): changed table and field names for ios crashes * feat(backend): added payload for tapRage events * feat(backend): added TapRage events insert to DB * feat(backend): added fps value to /mobile/start response * feat(backend): added image quality parameter to /mobile/start response * feat(backend): added ScreenLeave handler * feat(backend): removed screenEnter and screenLeave events, added new viewComponent event --------- Co-authored-by: rjshrjndrn <rjshrjndrn@gmail.com>
This commit is contained in:
parent
bb7c086c5b
commit
d22e4e138e
76 changed files with 4958 additions and 2334 deletions
|
|
@ -46,15 +46,19 @@ ENV TZ=UTC \
|
|||
REDIS_STREAMS_MAX_LEN=10000 \
|
||||
TOPIC_RAW_WEB=raw \
|
||||
TOPIC_RAW_IOS=raw-ios \
|
||||
TOPIC_RAW_IMAGES=raw-images \
|
||||
TOPIC_CACHE=cache \
|
||||
TOPIC_ANALYTICS=analytics \
|
||||
TOPIC_TRIGGER=trigger \
|
||||
TOPIC_MOBILE_TRIGGER=mobile-trigger \
|
||||
GROUP_SINK=sink \
|
||||
GROUP_STORAGE=storage \
|
||||
GROUP_DB=db \
|
||||
GROUP_ENDER=ender \
|
||||
GROUP_CACHE=cache \
|
||||
GROUP_HEURISTICS=heuristics \
|
||||
GROUP_IMAGE_STORAGE=image-storage \
|
||||
GROUP_VIDEO_STORAGE=video-storage \
|
||||
AWS_REGION_WEB=eu-central-1 \
|
||||
AWS_REGION_IOS=eu-west-1 \
|
||||
AWS_REGION_ASSETS=eu-central-1 \
|
||||
|
|
@ -92,7 +96,10 @@ ENV TZ=UTC \
|
|||
|
||||
RUN if [ "$SERVICE_NAME" = "http" ]; then \
|
||||
wget https://raw.githubusercontent.com/ua-parser/uap-core/master/regexes.yaml -O "$UAPARSER_FILE" &&\
|
||||
wget https://static.openreplay.com/geoip/GeoLite2-City.mmdb -O "$MAXMINDDB_FILE"; fi
|
||||
wget https://static.openreplay.com/geoip/GeoLite2-City.mmdb -O "$MAXMINDDB_FILE"; \
|
||||
elif [ "$SERVICE_NAME" = "videostorage" ]; then \
|
||||
apk add --no-cache ffmpeg; \
|
||||
fi
|
||||
|
||||
|
||||
COPY --from=build /root/service /home/openreplay/service
|
||||
|
|
|
|||
|
|
@ -52,20 +52,29 @@ func main() {
|
|||
saver := datasaver.New(cfg, pg, sessManager)
|
||||
|
||||
// Message filter
|
||||
msgFilter := []int{messages.MsgMetadata, messages.MsgIssueEvent, messages.MsgSessionStart, messages.MsgSessionEnd,
|
||||
msgFilter := []int{
|
||||
// Web messages
|
||||
messages.MsgMetadata, messages.MsgIssueEvent, messages.MsgSessionStart, messages.MsgSessionEnd,
|
||||
messages.MsgUserID, messages.MsgUserAnonymousID, messages.MsgIntegrationEvent, messages.MsgPerformanceTrackAggr,
|
||||
messages.MsgJSException, messages.MsgResourceTiming, messages.MsgCustomEvent, messages.MsgCustomIssue,
|
||||
messages.MsgFetch, messages.MsgNetworkRequest, messages.MsgGraphQL, messages.MsgStateAction,
|
||||
messages.MsgCreateDocument, messages.MsgMouseClick,
|
||||
messages.MsgSetPageLocation, messages.MsgPageLoadTiming, messages.MsgPageRenderTiming,
|
||||
messages.MsgInputEvent, messages.MsgPageEvent, messages.MsgMouseThrashing, messages.MsgInputChange,
|
||||
messages.MsgUnbindNodes}
|
||||
messages.MsgUnbindNodes,
|
||||
// Mobile messages
|
||||
messages.MsgIOSSessionStart, messages.MsgIOSSessionEnd, messages.MsgIOSUserID, messages.MsgIOSUserAnonymousID,
|
||||
messages.MsgIOSMetadata, messages.MsgIOSCustomEvent, messages.MsgIOSNetworkCall, messages.MsgIOSScreenEnter,
|
||||
messages.MsgIOSScreenLeave, messages.MsgIOSClickEvent, messages.MsgIOSSwipeEvent, messages.MsgIOSInputEvent,
|
||||
messages.MsgIOSCrash, messages.MsgIOSIssueEvent,
|
||||
}
|
||||
|
||||
// Init consumer
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupDB,
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
cfg.TopicRawIOS,
|
||||
cfg.TopicAnalytics,
|
||||
},
|
||||
messages.NewMessageIterator(saver.Handle, msgFilter, true),
|
||||
|
|
|
|||
|
|
@ -58,13 +58,18 @@ func main() {
|
|||
return
|
||||
}
|
||||
|
||||
mobileMessages := []int{90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 107, 110, 111}
|
||||
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupEnder,
|
||||
[]string{cfg.TopicRawWeb},
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
cfg.TopicRawIOS,
|
||||
},
|
||||
messages.NewEnderMessageIterator(
|
||||
func(msg messages.Message) { sessionEndGenerator.UpdateSession(msg) },
|
||||
[]int{messages.MsgTimestamp},
|
||||
append([]int{messages.MsgTimestamp}, mobileMessages...),
|
||||
false),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
|
|
@ -168,10 +173,19 @@ func main() {
|
|||
}
|
||||
}
|
||||
}
|
||||
if sess.Platform == "ios" {
|
||||
msg := &messages.IOSSessionEnd{Timestamp: timestamp}
|
||||
if err := producer.Produce(cfg.TopicRawIOS, sessionID, msg.Encode()); err != nil {
|
||||
log.Printf("can't send iOSSessionEnd to topic: %s; sessID: %d", err, sessionID)
|
||||
return false, 0
|
||||
}
|
||||
} else {
|
||||
if err := producer.Produce(cfg.TopicRawWeb, sessionID, msg.Encode()); err != nil {
|
||||
log.Printf("can't send sessionEnd to topic: %s; sessID: %d", err, sessionID)
|
||||
return false, 0
|
||||
}
|
||||
}
|
||||
|
||||
if currDuration != 0 {
|
||||
diffDuration[sessionID] = int64(newDuration) - int64(currDuration)
|
||||
updatedDurations++
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import (
|
|||
"openreplay/backend/pkg/builders"
|
||||
"openreplay/backend/pkg/handlers"
|
||||
"openreplay/backend/pkg/handlers/custom"
|
||||
"openreplay/backend/pkg/handlers/ios"
|
||||
"openreplay/backend/pkg/handlers/web"
|
||||
"openreplay/backend/pkg/memory"
|
||||
"openreplay/backend/pkg/messages"
|
||||
|
|
@ -34,6 +35,8 @@ func main() {
|
|||
&web.NetworkIssueDetector{},
|
||||
&web.PerformanceAggregator{},
|
||||
web.NewAppCrashDetector(),
|
||||
&ios.TapRageDetector{},
|
||||
ios.NewViewComponentDurations(),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -43,6 +46,7 @@ func main() {
|
|||
cfg.GroupHeuristics,
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
cfg.TopicRawIOS,
|
||||
},
|
||||
messages.NewMessageIterator(eventBuilder.HandleMessage, nil, true),
|
||||
false,
|
||||
|
|
|
|||
73
backend/cmd/imagestorage/main.go
Normal file
73
backend/cmd/imagestorage/main.go
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
config "openreplay/backend/internal/config/imagestorage"
|
||||
"openreplay/backend/internal/imagestorage"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
storageMetrics "openreplay/backend/pkg/metrics/imagestorage"
|
||||
"openreplay/backend/pkg/queue"
|
||||
)
|
||||
|
||||
func main() {
|
||||
m := metrics.New()
|
||||
m.Register(storageMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
|
||||
srv, err := imagestorage.New(cfg)
|
||||
if err != nil {
|
||||
log.Printf("can't init storage service: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupImageStorage,
|
||||
[]string{
|
||||
cfg.TopicRawImages,
|
||||
},
|
||||
messages.NewImagesMessageIterator(func(data []byte, sessID uint64) {
|
||||
if err := srv.Process(sessID, data); err != nil {
|
||||
log.Printf("can't process image: %s", err)
|
||||
}
|
||||
}, nil, true),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
||||
log.Printf("Image storage service started\n")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
counterTick := time.Tick(time.Second * 30)
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
srv.Wait()
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-counterTick:
|
||||
srv.Wait()
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("can't commit messages: %s", err)
|
||||
}
|
||||
case msg := <-consumer.Rebalanced():
|
||||
log.Println(msg)
|
||||
default:
|
||||
err := consumer.ConsumeNext()
|
||||
if err != nil {
|
||||
log.Fatalf("Error on images consumption: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -75,10 +75,16 @@ func main() {
|
|||
sinkMetrics.IncreaseTotalMessages()
|
||||
|
||||
// Send SessionEnd trigger to storage service
|
||||
if msg.TypeID() == messages.MsgSessionEnd {
|
||||
if msg.TypeID() == messages.MsgSessionEnd || msg.TypeID() == messages.MsgIOSSessionEnd {
|
||||
if err := producer.Produce(cfg.TopicTrigger, msg.SessionID(), msg.Encode()); err != nil {
|
||||
log.Printf("can't send SessionEnd to trigger topic: %s; sessID: %d", err, msg.SessionID())
|
||||
}
|
||||
// duplicate session end message to mobile trigger topic to build video replay for mobile sessions
|
||||
if msg.TypeID() == messages.MsgIOSSessionEnd {
|
||||
if err := producer.Produce(cfg.TopicMobileTrigger, msg.SessionID(), msg.Encode()); err != nil {
|
||||
log.Printf("can't send iOSSessionEnd to mobile trigger topic: %s; sessID: %d", err, msg.SessionID())
|
||||
}
|
||||
}
|
||||
writer.Close(msg.SessionID())
|
||||
return
|
||||
}
|
||||
|
|
@ -178,6 +184,7 @@ func main() {
|
|||
cfg.GroupSink,
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
cfg.TopicRawIOS,
|
||||
},
|
||||
messages.NewSinkMessageIterator(msgHandler, nil, false),
|
||||
false,
|
||||
|
|
|
|||
|
|
@ -48,6 +48,15 @@ func main() {
|
|||
},
|
||||
messages.NewMessageIterator(
|
||||
func(msg messages.Message) {
|
||||
// Convert IOSSessionEnd to SessionEnd
|
||||
if msg.TypeID() == messages.MsgIOSSessionEnd {
|
||||
mobileEnd, oldMeta := msg.(*messages.IOSSessionEnd), msg.Meta()
|
||||
msg = &messages.SessionEnd{
|
||||
Timestamp: mobileEnd.Timestamp,
|
||||
}
|
||||
msg.Meta().SetMeta(oldMeta)
|
||||
}
|
||||
// Process session to save mob files to s3
|
||||
sesEnd := msg.(*messages.SessionEnd)
|
||||
if err := srv.Process(sesEnd); err != nil {
|
||||
log.Printf("upload session err: %s, sessID: %d", err, msg.SessionID())
|
||||
|
|
@ -56,7 +65,7 @@ func main() {
|
|||
// Log timestamp of last processed session
|
||||
counter.Update(msg.SessionID(), time.UnixMilli(msg.Meta().Batch().Timestamp()))
|
||||
},
|
||||
[]int{messages.MsgSessionEnd},
|
||||
[]int{messages.MsgSessionEnd, messages.MsgIOSSessionEnd},
|
||||
true,
|
||||
),
|
||||
false,
|
||||
|
|
|
|||
87
backend/cmd/videostorage/main.go
Normal file
87
backend/cmd/videostorage/main.go
Normal file
|
|
@ -0,0 +1,87 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
config "openreplay/backend/internal/config/videostorage"
|
||||
"openreplay/backend/internal/videostorage"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
storageMetrics "openreplay/backend/pkg/metrics/videostorage"
|
||||
"openreplay/backend/pkg/objectstorage/store"
|
||||
"openreplay/backend/pkg/queue"
|
||||
)
|
||||
|
||||
func main() {
|
||||
m := metrics.New()
|
||||
m.Register(storageMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("can't init object storage: %s", err)
|
||||
}
|
||||
srv, err := videostorage.New(cfg, objStore)
|
||||
if err != nil {
|
||||
log.Printf("can't init storage service: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
workDir := cfg.FSDir
|
||||
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupVideoStorage,
|
||||
[]string{
|
||||
cfg.TopicMobileTrigger,
|
||||
},
|
||||
messages.NewMessageIterator(
|
||||
func(msg messages.Message) {
|
||||
sesEnd := msg.(*messages.IOSSessionEnd)
|
||||
log.Printf("recieved mobile session end: %d", sesEnd.SessionID())
|
||||
if err := srv.Process(sesEnd.SessionID(), workDir+"/screenshots/"+strconv.FormatUint(sesEnd.SessionID(), 10)+"/"); err != nil {
|
||||
log.Printf("upload session err: %s, sessID: %d", err, msg.SessionID())
|
||||
}
|
||||
},
|
||||
[]int{messages.MsgIOSSessionEnd},
|
||||
true,
|
||||
),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
||||
log.Printf("Video storage service started\n")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
counterTick := time.Tick(time.Second * 30)
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
srv.Wait()
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-counterTick:
|
||||
srv.Wait()
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("can't commit messages: %s", err)
|
||||
}
|
||||
case msg := <-consumer.Rebalanced():
|
||||
log.Println(msg)
|
||||
default:
|
||||
err = consumer.ConsumeNext()
|
||||
if err != nil {
|
||||
log.Fatalf("Error on end event consumption: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -16,6 +16,7 @@ type Config struct {
|
|||
GroupDB string `env:"GROUP_DB,required"`
|
||||
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
|
||||
TopicAnalytics string `env:"TOPIC_ANALYTICS,required"`
|
||||
TopicRawIOS string `env:"TOPIC_RAW_IOS,required"`
|
||||
CommitBatchTimeout time.Duration `env:"COMMIT_BATCH_TIMEOUT,default=15s"`
|
||||
BatchQueueLimit int `env:"DB_BATCH_QUEUE_LIMIT,required"`
|
||||
BatchSizeLimit int `env:"DB_BATCH_SIZE_LIMIT,required"`
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ type Config struct {
|
|||
GroupEnder string `env:"GROUP_ENDER,required"`
|
||||
LoggerTimeout int `env:"LOG_QUEUE_STATS_INTERVAL_SEC,required"`
|
||||
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
|
||||
TopicRawIOS string `env:"TOPIC_RAW_IOS,required"`
|
||||
ProducerTimeout int `env:"PRODUCER_TIMEOUT,default=2000"`
|
||||
PartitionsNumber int `env:"PARTITIONS_NUMBER,required"`
|
||||
UseEncryption bool `env:"USE_ENCRYPTION,default=false"`
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@ type Config struct {
|
|||
HTTPTimeout time.Duration `env:"HTTP_TIMEOUT,default=60s"`
|
||||
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
|
||||
TopicRawIOS string `env:"TOPIC_RAW_IOS,required"`
|
||||
TopicRawImages string `env:"TOPIC_RAW_IMAGES,required"`
|
||||
BeaconSizeLimit int64 `env:"BEACON_SIZE_LIMIT,required"`
|
||||
CompressionThreshold int64 `env:"COMPRESSION_THRESHOLD,default=20000"`
|
||||
JsonSizeLimit int64 `env:"JSON_SIZE_LIMIT,default=1000"`
|
||||
|
|
|
|||
21
backend/internal/config/imagestorage/config.go
Normal file
21
backend/internal/config/imagestorage/config.go
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
package imagestorage
|
||||
|
||||
import (
|
||||
"openreplay/backend/internal/config/common"
|
||||
"openreplay/backend/internal/config/configurator"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
common.Config
|
||||
FSDir string `env:"FS_DIR,required"`
|
||||
ScreenshotsDir string `env:"SCREENSHOTS_DIR,default=screenshots"`
|
||||
TopicRawImages string `env:"TOPIC_RAW_IMAGES,required"`
|
||||
GroupImageStorage string `env:"GROUP_IMAGE_STORAGE,required"`
|
||||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
cfg := &Config{}
|
||||
configurator.Process(cfg)
|
||||
return cfg
|
||||
}
|
||||
|
|
@ -16,6 +16,7 @@ type Config struct {
|
|||
TopicRawIOS string `env:"TOPIC_RAW_IOS,required"`
|
||||
TopicCache string `env:"TOPIC_CACHE,required"`
|
||||
TopicTrigger string `env:"TOPIC_TRIGGER,required"`
|
||||
TopicMobileTrigger string `env:"TOPIC_MOBILE_TRIGGER,required"`
|
||||
CacheAssets bool `env:"CACHE_ASSETS,required"`
|
||||
AssetsOrigin string `env:"ASSETS_ORIGIN,required"`
|
||||
ProducerCloseTimeout int `env:"PRODUCER_CLOSE_TIMEOUT,default=15000"`
|
||||
|
|
|
|||
23
backend/internal/config/videostorage/config.go
Normal file
23
backend/internal/config/videostorage/config.go
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
package videostorage
|
||||
|
||||
import (
|
||||
"openreplay/backend/internal/config/common"
|
||||
"openreplay/backend/internal/config/configurator"
|
||||
"openreplay/backend/internal/config/objectstorage"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
common.Config
|
||||
objectstorage.ObjectsConfig
|
||||
FSDir string `env:"FS_DIR,required"`
|
||||
GroupVideoStorage string `env:"GROUP_VIDEO_STORAGE,required"`
|
||||
TopicMobileTrigger string `env:"TOPIC_MOBILE_TRIGGER,required"`
|
||||
VideoReplayFPS int `env:"VIDEO_REPLAY_FPS,default=3"`
|
||||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
cfg := &Config{}
|
||||
configurator.Process(cfg)
|
||||
return cfg
|
||||
}
|
||||
|
|
@ -40,6 +40,15 @@ func (s *saverImpl) Handle(msg Message) {
|
|||
if msg.TypeID() == MsgCustomEvent {
|
||||
defer s.Handle(types.WrapCustomEvent(msg.(*CustomEvent)))
|
||||
}
|
||||
if IsIOSType(msg.TypeID()) {
|
||||
// Handle iOS messages
|
||||
if err := s.handleMobileMessage(msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("iOS Message Insertion Error %v, SessionID: %v, Message: %v", err, msg.SessionID(), msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
if err := s.handleMessage(msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v, SessionID: %v, Message: %v", err, msg.SessionID(), msg)
|
||||
|
|
@ -52,6 +61,73 @@ func (s *saverImpl) Handle(msg Message) {
|
|||
return
|
||||
}
|
||||
|
||||
func (s *saverImpl) handleMobileMessage(msg Message) error {
|
||||
session, err := s.sessions.Get(msg.SessionID())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch m := msg.(type) {
|
||||
case *IOSSessionStart:
|
||||
return s.pg.InsertIOSSessionStart(m.SessionID(), m)
|
||||
case *IOSSessionEnd:
|
||||
return s.pg.InsertIOSSessionEnd(m.SessionID(), m)
|
||||
case *IOSUserID:
|
||||
if err = s.sessions.UpdateUserID(session.SessionID, m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
s.pg.InsertAutocompleteValue(session.SessionID, session.ProjectID, "USERID_IOS", m.Value)
|
||||
return nil
|
||||
case *IOSUserAnonymousID:
|
||||
if err = s.sessions.UpdateAnonymousID(session.SessionID, m.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
s.pg.InsertAutocompleteValue(session.SessionID, session.ProjectID, "USERANONYMOUSID_IOS", m.Value)
|
||||
return nil
|
||||
case *IOSMetadata:
|
||||
return s.sessions.UpdateMetadata(m.SessionID(), m.Key, m.Value)
|
||||
case *IOSCustomEvent:
|
||||
return s.pg.InsertIOSCustomEvent(session, m)
|
||||
case *IOSClickEvent:
|
||||
if err := s.pg.InsertIOSClickEvent(session, m); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.sessions.UpdateEventsStats(session.SessionID, 1, 0)
|
||||
case *IOSSwipeEvent:
|
||||
if err := s.pg.InsertIOSSwipeEvent(session, m); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.sessions.UpdateEventsStats(session.SessionID, 1, 0)
|
||||
case *IOSInputEvent:
|
||||
if err := s.pg.InsertIOSInputEvent(session, m); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.sessions.UpdateEventsStats(session.SessionID, 1, 0)
|
||||
case *IOSNetworkCall:
|
||||
return s.pg.InsertIOSNetworkCall(session, m)
|
||||
case *IOSScreenEnter:
|
||||
if err := s.pg.InsertIOSScreenEnter(session, m); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.sessions.UpdateEventsStats(session.SessionID, 1, 1)
|
||||
case *IOSScreenLeave:
|
||||
if err := s.pg.InsertIOSScreenLeave(session, m); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.sessions.UpdateEventsStats(session.SessionID, 1, 0)
|
||||
case *IOSCrash:
|
||||
if err := s.pg.InsertIOSCrash(session.SessionID, session.ProjectID, m); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.sessions.UpdateIssuesStats(session.SessionID, 1, 1000)
|
||||
case *IOSIssueEvent:
|
||||
if err = s.pg.InsertIOSIssueEvent(session, m); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.sessions.UpdateIssuesStats(session.SessionID, 0, postgres.GetIssueScore(m.Type))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *saverImpl) handleMessage(msg Message) error {
|
||||
session, err := s.sessions.Get(msg.SessionID())
|
||||
if err != nil {
|
||||
|
|
@ -69,7 +145,7 @@ func (s *saverImpl) handleMessage(msg Message) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.sessions.UpdateIssuesStats(session.SessionID, 0, postgres.GetIssueScore(m))
|
||||
return s.sessions.UpdateIssuesStats(session.SessionID, 0, postgres.GetIssueScore(m.Type))
|
||||
case *CustomIssue:
|
||||
ie := &IssueEvent{
|
||||
Type: "custom",
|
||||
|
|
@ -82,7 +158,7 @@ func (s *saverImpl) handleMessage(msg Message) error {
|
|||
if err = s.pg.InsertIssueEvent(session, ie); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.sessions.UpdateIssuesStats(session.SessionID, 0, postgres.GetIssueScore(ie))
|
||||
return s.sessions.UpdateIssuesStats(session.SessionID, 0, postgres.GetIssueScore(ie.Type))
|
||||
case *UserID:
|
||||
if err = s.sessions.UpdateUserID(session.SessionID, m.ID); err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -64,6 +64,24 @@ func MapIOSDevice(identifier string) string {
|
|||
return "iPhone 12 Pro"
|
||||
case "iPhone13,4":
|
||||
return "iPhone 12 Pro Max"
|
||||
case "iPhone14,4":
|
||||
return "iPhone 13 mini"
|
||||
case "iPhone14,5":
|
||||
return "iPhone 13"
|
||||
case "iPhone14,2":
|
||||
return "iPhone 13 Pro"
|
||||
case "iPhone14,3":
|
||||
return "iPhone 13 Pro Max"
|
||||
case "iPhone14,7":
|
||||
return "iPhone 14"
|
||||
case "iPhone14,8":
|
||||
return "iPhone 14 Plus"
|
||||
case "iPhone15,2":
|
||||
return "iPhone 14 Pro"
|
||||
case "iPhone15,3":
|
||||
return "iPhone 14 Pro Max"
|
||||
case "iPhone14,6":
|
||||
return "iPhone SE (3rd generation)"
|
||||
case "iPad2,1", "iPad2,2", "iPad2,3", "iPad2,4":
|
||||
return "iPad 2"
|
||||
case "iPad3,1", "iPad3,2", "iPad3,3":
|
||||
|
|
@ -78,6 +96,10 @@ func MapIOSDevice(identifier string) string {
|
|||
return "iPad (7th generation)"
|
||||
case "iPad11,6", "iPad11,7":
|
||||
return "iPad (8th generation)"
|
||||
case "iPad12,1", "iPad12,2":
|
||||
return "iPad (9th generation)"
|
||||
case "iPad13,18", "iPad13,19":
|
||||
return "iPad (10th generation)"
|
||||
case "iPad4,1", "iPad4,2", "iPad4,3":
|
||||
return "iPad Air"
|
||||
case "iPad5,3", "iPad5,4":
|
||||
|
|
@ -86,6 +108,8 @@ func MapIOSDevice(identifier string) string {
|
|||
return "iPad Air (3rd generation)"
|
||||
case "iPad13,1", "iPad13,2":
|
||||
return "iPad Air (4th generation)"
|
||||
case "iPad13,16", "iPad13,17":
|
||||
return "iPad Air (5th generation)"
|
||||
case "iPad2,5", "iPad2,6", "iPad2,7":
|
||||
return "iPad mini"
|
||||
case "iPad4,4", "iPad4,5", "iPad4,6":
|
||||
|
|
@ -96,6 +120,8 @@ func MapIOSDevice(identifier string) string {
|
|||
return "iPad mini 4"
|
||||
case "iPad11,1", "iPad11,2":
|
||||
return "iPad mini (5th generation)"
|
||||
case "iPad14,1", "iPad14,2":
|
||||
return "iPad mini (6th generation)"
|
||||
case "iPad6,3", "iPad6,4":
|
||||
return "iPad Pro (9.7-inch)"
|
||||
case "iPad7,3", "iPad7,4":
|
||||
|
|
@ -104,6 +130,10 @@ func MapIOSDevice(identifier string) string {
|
|||
return "iPad Pro (11-inch) (1st generation)"
|
||||
case "iPad8,9", "iPad8,10":
|
||||
return "iPad Pro (11-inch) (2nd generation)"
|
||||
case "iPad13,4", "iPad13,5", "iPad13,6", "iPad13,7":
|
||||
return "iPad Pro (11-inch) (3rd generation)"
|
||||
case "iPad14,3", "iPad14,4":
|
||||
return "iPad Pro (11-inch) (4th generation)"
|
||||
case "iPad6,7", "iPad6,8":
|
||||
return "iPad Pro (12.9-inch) (1st generation)"
|
||||
case "iPad7,1", "iPad7,2":
|
||||
|
|
@ -112,6 +142,10 @@ func MapIOSDevice(identifier string) string {
|
|||
return "iPad Pro (12.9-inch) (3rd generation)"
|
||||
case "iPad8,11", "iPad8,12":
|
||||
return "iPad Pro (12.9-inch) (4th generation)"
|
||||
case "iPad13,8", "iPad13,9", "iPad13,10", "iPad13,11":
|
||||
return "iPad Pro (12.9-inch) (5th generation)"
|
||||
case "iPad14,5", "iPad14,6":
|
||||
return "iPad Pro (12.9-inch) (6th generation)"
|
||||
case "AppleTV5,3":
|
||||
return "Apple TV"
|
||||
case "AppleTV6,2":
|
||||
|
|
@ -120,7 +154,7 @@ func MapIOSDevice(identifier string) string {
|
|||
return "HomePod"
|
||||
case "AudioAccessory5,1":
|
||||
return "HomePod mini"
|
||||
case "i386", "x86_64":
|
||||
case "i386", "x86_64", "arm64":
|
||||
return "Simulator"
|
||||
default:
|
||||
return identifier
|
||||
|
|
@ -129,7 +163,7 @@ func MapIOSDevice(identifier string) string {
|
|||
|
||||
func GetIOSDeviceType(identifier string) string {
|
||||
if strings.Contains(identifier, "iPhone") {
|
||||
return "mobile" //"phone"
|
||||
return "mobile"
|
||||
}
|
||||
if strings.Contains(identifier, "iPad") {
|
||||
return "tablet"
|
||||
|
|
|
|||
224
backend/internal/http/router/handlers-mobile.go
Normal file
224
backend/internal/http/router/handlers-mobile.go
Normal file
|
|
@ -0,0 +1,224 @@
|
|||
package router
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"openreplay/backend/internal/http/ios"
|
||||
"openreplay/backend/internal/http/util"
|
||||
"openreplay/backend/internal/http/uuid"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
"openreplay/backend/pkg/token"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
req := &StartIOSSessionRequest{}
|
||||
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
body := http.MaxBytesReader(w, r.Body, e.cfg.JsonSizeLimit)
|
||||
defer body.Close()
|
||||
|
||||
if err := json.NewDecoder(body).Decode(req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
if req.ProjectKey == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
p, err := e.services.Projects.GetProjectByKey(*req.ProjectKey)
|
||||
if err != nil {
|
||||
if postgres.IsNoRowsErr(err) {
|
||||
ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active"), startTime, r.URL.Path, 0)
|
||||
} else {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Check if the project supports mobile sessions
|
||||
if !p.IsMobile() {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("project doesn't support mobile sessions"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
userUUID := uuid.GetUUID(req.UserUUID)
|
||||
tokenData, err := e.services.Tokenizer.Parse(req.Token)
|
||||
|
||||
if err != nil { // Starting the new one
|
||||
dice := byte(rand.Intn(100)) // [0, 100)
|
||||
if dice >= p.SampleRate {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
ua := e.services.UaParser.ParseFromHTTPRequest(r)
|
||||
if ua == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli()))
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
|
||||
tokenData = &token.TokenData{sessionID, 0, expTime.UnixMilli()}
|
||||
|
||||
geoInfo := e.ExtractGeoData(r)
|
||||
|
||||
if err := e.services.Sessions.Add(&sessions.Session{
|
||||
SessionID: sessionID,
|
||||
Platform: "ios",
|
||||
Timestamp: req.Timestamp,
|
||||
Timezone: req.Timezone,
|
||||
ProjectID: p.ProjectID,
|
||||
TrackerVersion: req.TrackerVersion,
|
||||
RevID: req.RevID,
|
||||
UserUUID: userUUID,
|
||||
UserOS: "IOS",
|
||||
UserOSVersion: req.UserOSVersion,
|
||||
UserDevice: ios.MapIOSDevice(req.UserDevice),
|
||||
UserDeviceType: ios.GetIOSDeviceType(req.UserDevice),
|
||||
UserCountry: geoInfo.Country,
|
||||
UserState: geoInfo.State,
|
||||
UserCity: geoInfo.City,
|
||||
UserDeviceMemorySize: req.DeviceMemory,
|
||||
UserDeviceHeapSize: req.DeviceMemory,
|
||||
}); err != nil {
|
||||
log.Printf("failed to add mobile session to DB: %v", err)
|
||||
}
|
||||
|
||||
sessStart := &messages.IOSSessionStart{
|
||||
Timestamp: req.Timestamp,
|
||||
ProjectID: uint64(p.ProjectID),
|
||||
TrackerVersion: req.TrackerVersion,
|
||||
RevID: req.RevID,
|
||||
UserUUID: userUUID,
|
||||
UserOS: "IOS",
|
||||
UserOSVersion: req.UserOSVersion,
|
||||
UserDevice: ios.MapIOSDevice(req.UserDevice),
|
||||
UserDeviceType: ios.GetIOSDeviceType(req.UserDevice),
|
||||
UserCountry: geoInfo.Pack(),
|
||||
}
|
||||
log.Printf("mobile session start: %+v", sessStart)
|
||||
|
||||
if err := e.services.Producer.Produce(e.cfg.TopicRawIOS, tokenData.ID, sessStart.Encode()); err != nil {
|
||||
log.Printf("failed to produce mobile session start message: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
ResponseWithJSON(w, &StartIOSSessionResponse{
|
||||
Token: e.services.Tokenizer.Compose(*tokenData),
|
||||
UserUUID: userUUID,
|
||||
SessionID: strconv.FormatUint(tokenData.ID, 10),
|
||||
BeaconSizeLimit: e.cfg.BeaconSizeLimit,
|
||||
ImageQuality: "standard", // Pull from project settings (low, standard, high)
|
||||
FrameRate: 3, // Pull from project settings
|
||||
}, startTime, r.URL.Path, 0)
|
||||
}
|
||||
|
||||
func (e *Router) pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawIOS)
|
||||
}
|
||||
|
||||
func (e *Router) pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil && err != token.EXPIRED {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
// Check timestamps here?
|
||||
e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawIOS)
|
||||
}
|
||||
|
||||
func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
log.Printf("recieved imagerequest")
|
||||
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil { // Should accept expired token?
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
r.Body = http.MaxBytesReader(w, r.Body, e.cfg.FileSizeLimit)
|
||||
defer r.Body.Close()
|
||||
|
||||
err = r.ParseMultipartForm(5 * 1e6) // ~5Mb
|
||||
if err == http.ErrNotMultipart || err == http.ErrMissingBoundary {
|
||||
ResponseWithError(w, http.StatusUnsupportedMediaType, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
// } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB
|
||||
} else if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging
|
||||
return
|
||||
}
|
||||
|
||||
if r.MultipartForm == nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
if len(r.MultipartForm.Value["projectKey"]) == 0 {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("projectKey parameter missing"), startTime, r.URL.Path, 0) // status for missing/wrong parameter?
|
||||
return
|
||||
}
|
||||
|
||||
//prefix := r.MultipartForm.Value["projectKey"][0] + "/" + strconv.FormatUint(sessionData.ID, 10) + "/"
|
||||
|
||||
for _, fileHeaderList := range r.MultipartForm.File {
|
||||
for _, fileHeader := range fileHeaderList {
|
||||
file, err := fileHeader.Open()
|
||||
if err != nil {
|
||||
continue // TODO: send server error or accumulate successful files
|
||||
}
|
||||
//key := prefix + fileHeader.Filename
|
||||
|
||||
data, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
log.Fatalf("failed reading data: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("Uploading image... %v, len: %d", util.SafeString(fileHeader.Filename), len(data))
|
||||
|
||||
if err := e.services.Producer.Produce(e.cfg.TopicRawImages, sessionData.ID, data); err != nil {
|
||||
log.Printf("failed to produce mobile session start message: %v", err)
|
||||
}
|
||||
log.Printf("Image uploaded")
|
||||
//go func() { //TODO: mime type from header
|
||||
// log.Printf("Uploading image... %v", file)
|
||||
// //if err := e.services.Storage.Upload(file, key, "image/jpeg", false); err != nil {
|
||||
// // log.Printf("Upload ios screen error. %v", err)
|
||||
// //}
|
||||
//}()
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
|
@ -115,6 +115,12 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
// Check if the project supports mobile sessions
|
||||
if !p.IsWeb() {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("project doesn't support web sessions"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
ua := e.services.UaParser.ParseFromHTTPRequest(r)
|
||||
if ua == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, bodySize)
|
||||
|
|
|
|||
|
|
@ -23,20 +23,24 @@ func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID
|
|||
|
||||
reader, err = gzip.NewReader(body)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0) // TODO: stage-dependent response
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
//log.Println("Gzip reader init", reader)
|
||||
log.Println("Gzip reader init", reader)
|
||||
defer reader.Close()
|
||||
default:
|
||||
reader = body
|
||||
}
|
||||
//log.Println("Reader after switch:", reader)
|
||||
log.Println("Reader after switch:", reader)
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0) // TODO: send error here only on staging
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
log.Println("Produce message: ", buf, string(buf))
|
||||
if err := e.services.Producer.Produce(topicName, sessionID, buf); err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
e.services.Producer.Produce(topicName, sessionID, buf) // What if not able to send?
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,3 +38,26 @@ type NotStartedRequest struct {
|
|||
TrackerVersion string `json:"trackerVersion"`
|
||||
DoNotTrack bool `json:"DoNotTrack"`
|
||||
}
|
||||
|
||||
type StartIOSSessionRequest struct {
|
||||
Token string `json:"token"`
|
||||
ProjectKey *string `json:"projectKey"`
|
||||
TrackerVersion string `json:"trackerVersion"`
|
||||
RevID string `json:"revID"`
|
||||
UserUUID *string `json:"userUUID"`
|
||||
UserOSVersion string `json:"userOSVersion"`
|
||||
UserDevice string `json:"userDevice"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
Timezone string `json:"timezone"`
|
||||
DeviceMemory uint64 `json:"deviceMemory"`
|
||||
}
|
||||
|
||||
type StartIOSSessionResponse struct {
|
||||
Token string `json:"token"`
|
||||
ImagesHashList []string `json:"imagesHashList"`
|
||||
UserUUID string `json:"userUUID"`
|
||||
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
|
||||
SessionID string `json:"sessionID"`
|
||||
ImageQuality string `json:"quality"`
|
||||
FrameRate int `json:"fps"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -105,6 +105,10 @@ func (e *Router) init() {
|
|||
"/v1/web/start": e.startSessionHandlerWeb,
|
||||
"/v1/web/i": e.pushMessagesHandlerWeb,
|
||||
"/v1/web/feature-flags": e.featureFlagsHandlerWeb,
|
||||
"/v1/mobile/start": e.startSessionHandlerIOS,
|
||||
"/v1/mobile/i": e.pushMessagesHandlerIOS,
|
||||
"/v1/mobile/late": e.pushLateMessagesHandlerIOS,
|
||||
"/v1/mobile/images": e.imagesUploadHandlerIOS,
|
||||
}
|
||||
prefix := "/ingest"
|
||||
|
||||
|
|
|
|||
133
backend/internal/imagestorage/service.go
Normal file
133
backend/internal/imagestorage/service.go
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
package imagestorage
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
config "openreplay/backend/internal/config/imagestorage"
|
||||
)
|
||||
|
||||
type Task struct {
|
||||
sessionID uint64 // to generate path
|
||||
images map[string]*bytes.Buffer
|
||||
isBreakTask bool
|
||||
}
|
||||
|
||||
func NewBreakTask() *Task {
|
||||
return &Task{isBreakTask: true}
|
||||
}
|
||||
|
||||
type ImageStorage struct {
|
||||
cfg *config.Config
|
||||
writeToDiskTasks chan *Task
|
||||
workersStopped chan struct{}
|
||||
}
|
||||
|
||||
func New(cfg *config.Config) (*ImageStorage, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
}
|
||||
newStorage := &ImageStorage{
|
||||
cfg: cfg,
|
||||
writeToDiskTasks: make(chan *Task, 1),
|
||||
workersStopped: make(chan struct{}),
|
||||
}
|
||||
go newStorage.runWorker()
|
||||
return newStorage, nil
|
||||
}
|
||||
|
||||
func (v *ImageStorage) Wait() {
|
||||
// send stop signal
|
||||
v.writeToDiskTasks <- NewBreakTask()
|
||||
// wait for workers to stop
|
||||
<-v.workersStopped
|
||||
}
|
||||
|
||||
func (v *ImageStorage) Process(sessID uint64, data []byte) error {
|
||||
start := time.Now()
|
||||
if err := v.extract(sessID, data); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("sessID: %d, arch size: %d, extracted archive in: %s", sessID, len(data), time.Since(start))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *ImageStorage) extract(sessID uint64, data []byte) error {
|
||||
images := make(map[string]*bytes.Buffer)
|
||||
uncompressedStream, err := gzip.NewReader(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create gzip reader: %s", err.Error())
|
||||
}
|
||||
tarReader := tar.NewReader(uncompressedStream)
|
||||
|
||||
for {
|
||||
header, err := tarReader.Next()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return fmt.Errorf("can't read tar header: %s", err.Error())
|
||||
}
|
||||
|
||||
if header.Typeflag == tar.TypeReg {
|
||||
var buf bytes.Buffer
|
||||
if _, err := buf.ReadFrom(tarReader); err != nil {
|
||||
return fmt.Errorf("can't copy file: %s", err.Error())
|
||||
}
|
||||
images[header.Name] = &buf
|
||||
} else {
|
||||
log.Printf("ExtractTarGz: uknown type: %d in %s", header.Typeflag, header.Name)
|
||||
}
|
||||
}
|
||||
|
||||
v.writeToDiskTasks <- &Task{sessionID: sessID, images: images}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *ImageStorage) writeToDisk(task *Task) {
|
||||
// Build the directory path
|
||||
path := v.cfg.FSDir + "/"
|
||||
if v.cfg.ScreenshotsDir != "" {
|
||||
path += v.cfg.ScreenshotsDir + "/"
|
||||
}
|
||||
path += strconv.FormatUint(task.sessionID, 10) + "/"
|
||||
|
||||
// Ensure the directory exists
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
log.Fatalf("Error creating directories: %v", err)
|
||||
}
|
||||
|
||||
// Write images to disk
|
||||
for name, img := range task.images {
|
||||
outFile, err := os.Create(path + name) // or open file in rewrite mode
|
||||
if err != nil {
|
||||
log.Printf("can't create file: %s", err.Error())
|
||||
}
|
||||
if _, err := io.Copy(outFile, img); err != nil {
|
||||
log.Printf("can't copy file: %s", err.Error())
|
||||
}
|
||||
outFile.Close()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (v *ImageStorage) runWorker() {
|
||||
for {
|
||||
select {
|
||||
case task := <-v.writeToDiskTasks:
|
||||
if task.isBreakTask {
|
||||
v.workersStopped <- struct{}{}
|
||||
continue
|
||||
}
|
||||
v.writeToDisk(task)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -17,6 +17,7 @@ type session struct {
|
|||
lastUpdate int64 // local timestamp
|
||||
lastUserTime uint64
|
||||
isEnded bool
|
||||
isMobile bool
|
||||
}
|
||||
|
||||
// SessionEnder updates timestamp of last message for each session
|
||||
|
|
@ -73,6 +74,10 @@ func (se *SessionEnder) UpdateSession(msg messages.Message) {
|
|||
msgTimestamp = msg.Meta().Timestamp
|
||||
localTimestamp = time.Now().UnixMilli()
|
||||
)
|
||||
if messages.IsIOSType(msg.TypeID()) {
|
||||
msgTimestamp = messages.GetTimestamp(msg)
|
||||
log.Printf("got timestamp from iOS message, session: %d, ts: %d", msg.SessionID(), msgTimestamp)
|
||||
}
|
||||
if batchTimestamp == 0 {
|
||||
log.Printf("got empty timestamp for sessionID: %d", sessionID)
|
||||
return
|
||||
|
|
@ -86,6 +91,7 @@ func (se *SessionEnder) UpdateSession(msg messages.Message) {
|
|||
lastUpdate: localTimestamp,
|
||||
lastUserTime: msgTimestamp, // last timestamp from user's machine
|
||||
isEnded: false,
|
||||
isMobile: messages.IsIOSType(msg.TypeID()),
|
||||
}
|
||||
ender.IncreaseActiveSessions()
|
||||
ender.IncreaseTotalSessions()
|
||||
|
|
|
|||
114
backend/internal/videostorage/service.go
Normal file
114
backend/internal/videostorage/service.go
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
package videostorage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
config "openreplay/backend/internal/config/videostorage"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Task struct {
|
||||
sessionID string
|
||||
path string
|
||||
isBreakTask bool
|
||||
}
|
||||
|
||||
func NewBreakTask() *Task {
|
||||
return &Task{isBreakTask: true}
|
||||
}
|
||||
|
||||
type VideoStorage struct {
|
||||
cfg *config.Config
|
||||
framerate string
|
||||
objStorage objectstorage.ObjectStorage
|
||||
sendToS3Tasks chan *Task
|
||||
workersStopped chan struct{}
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, objStorage objectstorage.ObjectStorage) (*VideoStorage, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
case objStorage == nil:
|
||||
return nil, fmt.Errorf("object storage is empty")
|
||||
case cfg.VideoReplayFPS <= 0:
|
||||
return nil, fmt.Errorf("video replay fps is invalid: %d", cfg.VideoReplayFPS)
|
||||
}
|
||||
newStorage := &VideoStorage{
|
||||
cfg: cfg,
|
||||
framerate: strconv.Itoa(cfg.VideoReplayFPS),
|
||||
objStorage: objStorage,
|
||||
sendToS3Tasks: make(chan *Task, 1),
|
||||
workersStopped: make(chan struct{}),
|
||||
}
|
||||
go newStorage.runWorker()
|
||||
return newStorage, nil
|
||||
}
|
||||
|
||||
func (v *VideoStorage) makeVideo(sessID uint64, filesPath string) error {
|
||||
files, _ := ioutil.ReadDir(filesPath)
|
||||
log.Printf("There are %d screenshot of session %d\n", len(files), sessID)
|
||||
|
||||
// Try to call ffmpeg and print the result
|
||||
start := time.Now()
|
||||
sessionID := strconv.FormatUint(sessID, 10)
|
||||
imagesPath := "/mnt/efs/screenshots/" + sessionID + "/%06d.jpeg"
|
||||
videoPath := "/mnt/efs/screenshots/" + sessionID + "/replay.mp4"
|
||||
cmd := exec.Command("ffmpeg", "-y", "-f", "image2", "-framerate", v.framerate, "-start_number", "000000", "-i",
|
||||
imagesPath, "-vf", "scale=-2:1064", "-c:v", "libx264", "-preset", "ultrafast", "-crf", "23",
|
||||
videoPath)
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to execute command: %v, stderr: %v", err, stderr.String())
|
||||
}
|
||||
log.Printf("made video replay in %v", time.Since(start))
|
||||
v.sendToS3Tasks <- &Task{sessionID: sessionID, path: videoPath}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *VideoStorage) sendToS3(task *Task) {
|
||||
start := time.Now()
|
||||
// Read video file from disk
|
||||
video, err := ioutil.ReadFile(task.path)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to read video file: %v", err)
|
||||
}
|
||||
// Upload video file to S3
|
||||
if err := v.objStorage.Upload(bytes.NewReader(video), task.sessionID+"/replay.mp4", "video/mp4", objectstorage.NoCompression); err != nil {
|
||||
log.Fatalf("Storage: start upload video replay failed. %s", err)
|
||||
}
|
||||
log.Printf("Video file (size: %d) uploaded successfully in %v", len(video), time.Since(start))
|
||||
return
|
||||
}
|
||||
|
||||
func (v *VideoStorage) Process(sessID uint64, filesPath string) error {
|
||||
return v.makeVideo(sessID, filesPath)
|
||||
}
|
||||
|
||||
func (v *VideoStorage) Wait() {
|
||||
v.sendToS3Tasks <- NewBreakTask()
|
||||
<-v.workersStopped
|
||||
}
|
||||
|
||||
func (v *VideoStorage) runWorker() {
|
||||
for {
|
||||
select {
|
||||
case task := <-v.sendToS3Tasks:
|
||||
if task.isBreakTask {
|
||||
v.workersStopped <- struct{}{}
|
||||
continue
|
||||
}
|
||||
v.sendToS3(task)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -6,11 +6,11 @@ import (
|
|||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func GetIssueScore(issueEvent *messages.IssueEvent) int {
|
||||
switch issueEvent.Type {
|
||||
func GetIssueScore(issueType string) int {
|
||||
switch issueType {
|
||||
case "crash", "dead_click", "memory", "cpu":
|
||||
return 1000
|
||||
case "bad_request", "excessive_scrolling", "click_rage", "missing_resource":
|
||||
case "bad_request", "excessive_scrolling", "click_rage", "missing_resource", "tap_rage":
|
||||
return 500
|
||||
case "slow_resource", "slow_page_load":
|
||||
return 100
|
||||
|
|
|
|||
163
backend/pkg/db/postgres/mobile.go
Normal file
163
backend/pkg/db/postgres/mobile.go
Normal file
|
|
@ -0,0 +1,163 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/hashid"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
"openreplay/backend/pkg/url"
|
||||
)
|
||||
|
||||
func (conn *Conn) InsertIOSSessionStart(sessionID uint64, e *messages.IOSSessionStart) error {
|
||||
log.Printf("handle ios session %d start: %v", sessionID, e)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSSessionEnd(sessionID uint64, e *messages.IOSSessionEnd) error {
|
||||
log.Printf("handle ios session %d end: %v", sessionID, e)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSCustomEvent(session *sessions.Session, e *messages.IOSCustomEvent) error {
|
||||
if err := conn.InsertCustomEvent(session.SessionID, e.Timestamp, truncSqIdx(e.Index), e.Name, e.Payload); err != nil {
|
||||
return err
|
||||
}
|
||||
conn.InsertAutocompleteValue(session.SessionID, session.ProjectID, "CUSTOM_IOS", e.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSNetworkCall(sess *sessions.Session, e *messages.IOSNetworkCall) error {
|
||||
err := conn.InsertRequest(sess.SessionID, e.Timestamp, truncSqIdx(e.Index), e.URL, e.Duration, e.Status < 400)
|
||||
if err == nil {
|
||||
conn.InsertAutocompleteValue(sess.SessionID, sess.ProjectID, "REQUEST_IOS", url.DiscardURLQuery(e.URL))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: add title column to views table and view_type (screen_enter, screen_leave)
|
||||
func (conn *Conn) InsertIOSScreenEnter(sess *sessions.Session, screenEnter *messages.IOSScreenEnter) error {
|
||||
if err := conn.Pool.Exec(`
|
||||
INSERT INTO events_ios.views (
|
||||
session_id, timestamp, seq_index, name
|
||||
) VALUES (
|
||||
$1, $2, $3, $4
|
||||
)`,
|
||||
sess.SessionID, screenEnter.Timestamp, screenEnter.Index, screenEnter.ViewName,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
conn.InsertAutocompleteValue(sess.SessionID, sess.ProjectID, "VIEW_IOS", screenEnter.ViewName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSScreenLeave(sess *sessions.Session, screenEnter *messages.IOSScreenLeave) error {
|
||||
if err := conn.Pool.Exec(`
|
||||
INSERT INTO events_ios.views (
|
||||
session_id, timestamp, seq_index, name
|
||||
) VALUES (
|
||||
$1, $2, $3, $4
|
||||
)`,
|
||||
sess.SessionID, screenEnter.Timestamp, screenEnter.Index, screenEnter.ViewName,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSClickEvent(sess *sessions.Session, clickEvent *messages.IOSClickEvent) error {
|
||||
if err := conn.Pool.Exec(`
|
||||
INSERT INTO events_ios.taps (
|
||||
session_id, timestamp, seq_index, label
|
||||
) VALUES (
|
||||
$1, $2, $3, $4
|
||||
)`,
|
||||
sess.SessionID, clickEvent.Timestamp, clickEvent.Index, clickEvent.Label,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
conn.InsertAutocompleteValue(sess.SessionID, sess.ProjectID, "CLICK_IOS", clickEvent.Label)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSSwipeEvent(sess *sessions.Session, swipeEvent *messages.IOSSwipeEvent) error {
|
||||
if err := conn.Pool.Exec(`
|
||||
INSERT INTO events_ios.swipes (
|
||||
session_id, timestamp, seq_index, label, direction
|
||||
) VALUES (
|
||||
$1, $2, $3, $4, $5
|
||||
)`,
|
||||
sess.SessionID, swipeEvent.Timestamp, swipeEvent.Index, swipeEvent.Label, swipeEvent.Direction,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
conn.InsertAutocompleteValue(sess.SessionID, sess.ProjectID, "SWIPE_IOS", swipeEvent.Label)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSInputEvent(sess *sessions.Session, inputEvent *messages.IOSInputEvent) error {
|
||||
if err := conn.Pool.Exec(`
|
||||
INSERT INTO events_ios.inputs (
|
||||
session_id, timestamp, seq_index, label
|
||||
) VALUES (
|
||||
$1, $2, $3, $4
|
||||
)`,
|
||||
sess.SessionID, inputEvent.Timestamp, inputEvent.Index, inputEvent.Label,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
conn.InsertAutocompleteValue(sess.SessionID, sess.ProjectID, "INPUT_IOS", inputEvent.Label)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSCrash(sessionID uint64, projectID uint32, crash *messages.IOSCrash) error {
|
||||
crashID := hashid.IOSCrashID(projectID, crash)
|
||||
|
||||
if err := conn.Pool.Exec(`
|
||||
INSERT INTO public.crashes_ios (
|
||||
project_id, crash_ios_id, name, reason, stacktrace
|
||||
) VALUES (
|
||||
$1, $2, $3, $4, $5
|
||||
) ON CONFLICT DO NOTHING`,
|
||||
projectID, crashID, crash.Name, crash.Reason, crash.Stacktrace,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := conn.Pool.Exec(`
|
||||
INSERT INTO events_common.crashes (
|
||||
session_id, timestamp, seq_index, crash_ios_id
|
||||
) VALUES (
|
||||
$1, $2, $3, $4
|
||||
)`,
|
||||
sessionID, crash.Timestamp, crash.Index, crashID,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSIssueEvent(sess *sessions.Session, e *messages.IOSIssueEvent) error {
|
||||
issueID := hashid.IOSIssueID(sess.ProjectID, e)
|
||||
payload := &e.Payload
|
||||
if *payload == "" || *payload == "{}" {
|
||||
payload = nil
|
||||
}
|
||||
|
||||
if err := conn.bulks.Get("webIssues").Append(sess.ProjectID, issueID, e.Type, e.ContextString); err != nil {
|
||||
log.Printf("insert web issue err: %s", err)
|
||||
}
|
||||
if err := conn.bulks.Get("webIssueEvents").Append(sess.SessionID, issueID, e.Timestamp, truncSqIdx(e.Index), payload); err != nil {
|
||||
log.Printf("insert web issue event err: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type IOSCrash struct {
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
Name string `json:"name"`
|
||||
Reason string `json:"reason"`
|
||||
Stacktrace string `json:"stacktrace"`
|
||||
}
|
||||
|
||||
type WebCrash struct {
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
}
|
||||
|
|
@ -1,21 +1,16 @@
|
|||
package ios
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"openreplay/backend/pkg/handlers"
|
||||
"openreplay/backend/pkg/handlers/web"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: ClickRage
|
||||
Input events: IOSClickEvent,
|
||||
IOSSessionEnd
|
||||
Output event: IOSIssueEvent
|
||||
*/
|
||||
const TapTimeDiff = 300
|
||||
const MinTapsInARow = 3
|
||||
|
||||
const CLICK_TIME_DIFF = 200
|
||||
|
||||
type ClickRageDetector struct {
|
||||
type TapRageDetector struct {
|
||||
handlers.ReadyMessageStore
|
||||
lastTimestamp uint64
|
||||
lastLabel string
|
||||
|
|
@ -24,11 +19,39 @@ type ClickRageDetector struct {
|
|||
countsInARow int
|
||||
}
|
||||
|
||||
func (h *ClickRageDetector) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
func (h *TapRageDetector) createPayload() string {
|
||||
p, err := json.Marshal(struct{ Count int }{h.countsInARow})
|
||||
if err != nil {
|
||||
log.Printf("can't marshal TapRage payload to json: %s", err)
|
||||
return ""
|
||||
}
|
||||
return string(p)
|
||||
}
|
||||
|
||||
func (h *TapRageDetector) Build() Message {
|
||||
if h.countsInARow >= MinTapsInARow {
|
||||
event := &IOSIssueEvent{
|
||||
Type: "tap_rage",
|
||||
ContextString: h.lastLabel,
|
||||
Timestamp: h.firstInARawTimestamp,
|
||||
Payload: h.createPayload(),
|
||||
}
|
||||
event.Index = h.firstInARawSeqIndex // Associated Index/ MessageID ?
|
||||
return event
|
||||
}
|
||||
h.lastTimestamp = 0
|
||||
h.lastLabel = ""
|
||||
h.firstInARawTimestamp = 0
|
||||
h.firstInARawSeqIndex = 0
|
||||
h.countsInARow = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *TapRageDetector) Handle(message Message, timestamp uint64) Message {
|
||||
var event Message = nil
|
||||
switch m := message.(type) {
|
||||
case *IOSClickEvent:
|
||||
if h.lastTimestamp+CLICK_TIME_DIFF < m.Timestamp && h.lastLabel == m.Label {
|
||||
if h.lastTimestamp+TapTimeDiff < m.Timestamp && h.lastLabel == m.Label {
|
||||
h.lastTimestamp = m.Timestamp
|
||||
h.countsInARow += 1
|
||||
return nil
|
||||
|
|
@ -46,21 +69,3 @@ func (h *ClickRageDetector) Handle(message Message, messageID uint64, timestamp
|
|||
}
|
||||
return event
|
||||
}
|
||||
|
||||
func (h *ClickRageDetector) Build() Message {
|
||||
if h.countsInARow >= web.MinClicksInARow {
|
||||
event := &IOSIssueEvent{
|
||||
Type: "click_rage",
|
||||
ContextString: h.lastLabel,
|
||||
}
|
||||
event.Timestamp = h.firstInARawTimestamp
|
||||
event.Index = h.firstInARawSeqIndex // Associated Index/ MessageID ?
|
||||
return event
|
||||
}
|
||||
h.lastTimestamp = 0
|
||||
h.lastLabel = ""
|
||||
h.firstInARawTimestamp = 0
|
||||
h.firstInARawSeqIndex = 0
|
||||
h.countsInARow = 0
|
||||
return nil
|
||||
}
|
||||
|
|
@ -16,6 +16,13 @@ func IssueID(projectID uint32, e *messages.IssueEvent) string {
|
|||
return strconv.FormatUint(uint64(projectID), 16) + hex.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
|
||||
func IOSIssueID(projectID uint32, e *messages.IOSIssueEvent) string {
|
||||
hash := fnv.New128a()
|
||||
hash.Write([]byte(e.Type))
|
||||
hash.Write([]byte(e.ContextString))
|
||||
return strconv.FormatUint(uint64(projectID), 16) + hex.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
|
||||
func IOSCrashID(projectID uint32, crash *messages.IOSCrash) string {
|
||||
hash := fnv.New128a()
|
||||
hash.Write([]byte(crash.Name))
|
||||
|
|
|
|||
|
|
@ -2,13 +2,13 @@
|
|||
package messages
|
||||
|
||||
func IsReplayerType(id int) bool {
|
||||
return 1 != id && 3 != id && 17 != id && 23 != id && 24 != id && 25 != id && 26 != id && 27 != id && 28 != id && 29 != id && 30 != id && 31 != id && 32 != id && 42 != id && 56 != id && 62 != id && 63 != id && 64 != id && 66 != id && 78 != id && 80 != id && 81 != id && 82 != id && 112 != id && 115 != id && 125 != id && 126 != id && 127 != id && 107 != id && 91 != id && 92 != id && 94 != id && 95 != id && 97 != id && 98 != id && 99 != id && 101 != id && 104 != id && 110 != id && 111 != id
|
||||
return 1 != id && 3 != id && 17 != id && 23 != id && 24 != id && 25 != id && 26 != id && 27 != id && 28 != id && 29 != id && 30 != id && 31 != id && 32 != id && 42 != id && 56 != id && 62 != id && 63 != id && 64 != id && 66 != id && 78 != id && 80 != id && 81 != id && 82 != id && 112 != id && 115 != id && 125 != id && 126 != id && 127 != id && 90 != id && 91 != id && 92 != id && 94 != id && 95 != id && 97 != id && 98 != id && 104 != id && 107 != id && 110 != id && 111 != id
|
||||
}
|
||||
|
||||
func IsIOSType(id int) bool {
|
||||
return 107 == id || 90 == id || 91 == id || 92 == id || 93 == id || 94 == id || 95 == id || 96 == id || 97 == id || 98 == id || 99 == id || 100 == id || 101 == id || 102 == id || 103 == id || 104 == id || 105 == id || 110 == id || 111 == id
|
||||
return 90 == id || 91 == id || 92 == id || 93 == id || 94 == id || 95 == id || 96 == id || 97 == id || 98 == id || 100 == id || 101 == id || 102 == id || 103 == id || 104 == id || 105 == id || 106 == id || 107 == id || 110 == id || 111 == id
|
||||
}
|
||||
|
||||
func IsDOMType(id int) bool {
|
||||
return 0 == id || 4 == id || 5 == id || 6 == id || 7 == id || 8 == id || 9 == id || 10 == id || 11 == id || 12 == id || 13 == id || 14 == id || 15 == id || 16 == id || 18 == id || 19 == id || 20 == id || 37 == id || 38 == id || 49 == id || 50 == id || 51 == id || 54 == id || 55 == id || 57 == id || 58 == id || 59 == id || 60 == id || 61 == id || 67 == id || 69 == id || 70 == id || 71 == id || 72 == id || 73 == id || 74 == id || 75 == id || 76 == id || 77 == id || 113 == id || 114 == id || 117 == id || 118 == id || 90 == id || 93 == id || 96 == id || 100 == id || 102 == id || 103 == id || 105 == id
|
||||
return 0 == id || 4 == id || 5 == id || 6 == id || 7 == id || 8 == id || 9 == id || 10 == id || 11 == id || 12 == id || 13 == id || 14 == id || 15 == id || 16 == id || 18 == id || 19 == id || 20 == id || 37 == id || 38 == id || 49 == id || 50 == id || 51 == id || 54 == id || 55 == id || 57 == id || 58 == id || 59 == id || 60 == id || 61 == id || 67 == id || 69 == id || 70 == id || 71 == id || 72 == id || 73 == id || 74 == id || 75 == id || 76 == id || 77 == id || 113 == id || 114 == id || 117 == id || 118 == id || 93 == id || 96 == id || 100 == id || 101 == id || 102 == id || 103 == id || 105 == id || 106 == id
|
||||
}
|
||||
|
|
@ -4,9 +4,6 @@ package messages
|
|||
func GetTimestamp(message Message) uint64 {
|
||||
switch msg := message.(type) {
|
||||
|
||||
case *IOSBatchMeta:
|
||||
return msg.Timestamp
|
||||
|
||||
case *IOSSessionStart:
|
||||
return msg.Timestamp
|
||||
|
||||
|
|
@ -31,10 +28,7 @@ func GetTimestamp(message Message) uint64 {
|
|||
case *IOSCrash:
|
||||
return msg.Timestamp
|
||||
|
||||
case *IOSScreenEnter:
|
||||
return msg.Timestamp
|
||||
|
||||
case *IOSScreenLeave:
|
||||
case *IOSViewComponentEvent:
|
||||
return msg.Timestamp
|
||||
|
||||
case *IOSClickEvent:
|
||||
|
|
@ -55,6 +49,12 @@ func GetTimestamp(message Message) uint64 {
|
|||
case *IOSNetworkCall:
|
||||
return msg.Timestamp
|
||||
|
||||
case *IOSSwipeEvent:
|
||||
return msg.Timestamp
|
||||
|
||||
case *IOSBatchMeta:
|
||||
return msg.Timestamp
|
||||
|
||||
case *IOSIssueEvent:
|
||||
return msg.Timestamp
|
||||
|
||||
|
|
|
|||
24
backend/pkg/messages/iterator-images.go
Normal file
24
backend/pkg/messages/iterator-images.go
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
package messages
|
||||
|
||||
type imagesIteratorImpl struct {
|
||||
coreIterator MessageIterator
|
||||
handler ImageMessageHandler
|
||||
}
|
||||
|
||||
type ImageMessageHandler func(data []byte, sessID uint64)
|
||||
|
||||
func NewImagesMessageIterator(messageHandler ImageMessageHandler, messageFilter []int, autoDecode bool) MessageIterator {
|
||||
enderIter := &imagesIteratorImpl{
|
||||
handler: messageHandler,
|
||||
}
|
||||
//enderIter.coreIterator = NewMessageIterator(enderIter.handle, messageFilter, autoDecode)
|
||||
return enderIter
|
||||
}
|
||||
|
||||
func (e *imagesIteratorImpl) handle(message Message) {
|
||||
//
|
||||
}
|
||||
|
||||
func (e *imagesIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
||||
e.handler(batchData, batchInfo.sessionID)
|
||||
}
|
||||
|
|
@ -43,6 +43,7 @@ func NewMessageIterator(messageHandler MessageHandler, messageFilter []int, auto
|
|||
iter.preFilter = map[int]struct{}{
|
||||
MsgBatchMetadata: {}, MsgBatchMeta: {}, MsgTimestamp: {},
|
||||
MsgSessionStart: {}, MsgSessionEnd: {}, MsgSetPageLocation: {},
|
||||
MsgIOSBatchMeta: {},
|
||||
}
|
||||
return iter
|
||||
}
|
||||
|
|
@ -110,7 +111,8 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
|||
|
||||
// Update timestamp value for iOS message types
|
||||
if IsIOSType(msgType) {
|
||||
msg.Meta().Timestamp = i.getIOSTimestamp(msg)
|
||||
msgTime := i.getIOSTimestamp(msg)
|
||||
msg.Meta().Timestamp = msgTime
|
||||
}
|
||||
|
||||
// Process message
|
||||
|
|
@ -184,6 +186,16 @@ func (i *messageIteratorImpl) preprocessing(msg Message) error {
|
|||
i.messageInfo.Url = m.URL
|
||||
// Save session page url in cache for using in next batches
|
||||
i.urls.Set(i.messageInfo.batch.sessionID, m.URL)
|
||||
|
||||
case *IOSBatchMeta:
|
||||
if i.messageInfo.Index > 1 { // Might be several 0-0 BatchMeta in a row without an error though
|
||||
return fmt.Errorf("batchMeta found at the end of the batch, info: %s", i.batchInfo.Info())
|
||||
}
|
||||
i.messageInfo.Index = m.FirstIndex
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("IOSBatchMeta")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -86,7 +86,6 @@ const (
|
|||
MsgIssueEvent = 125
|
||||
MsgSessionEnd = 126
|
||||
MsgSessionSearch = 127
|
||||
MsgIOSBatchMeta = 107
|
||||
MsgIOSSessionStart = 90
|
||||
MsgIOSSessionEnd = 91
|
||||
MsgIOSMetadata = 92
|
||||
|
|
@ -95,18 +94,20 @@ const (
|
|||
MsgIOSUserAnonymousID = 95
|
||||
MsgIOSScreenChanges = 96
|
||||
MsgIOSCrash = 97
|
||||
MsgIOSScreenEnter = 98
|
||||
MsgIOSScreenLeave = 99
|
||||
MsgIOSViewComponentEvent = 98
|
||||
MsgIOSClickEvent = 100
|
||||
MsgIOSInputEvent = 101
|
||||
MsgIOSPerformanceEvent = 102
|
||||
MsgIOSLog = 103
|
||||
MsgIOSInternalError = 104
|
||||
MsgIOSNetworkCall = 105
|
||||
MsgIOSSwipeEvent = 106
|
||||
MsgIOSBatchMeta = 107
|
||||
MsgIOSPerformanceAggregated = 110
|
||||
MsgIOSIssueEvent = 111
|
||||
)
|
||||
|
||||
|
||||
type Timestamp struct {
|
||||
message
|
||||
Timestamp uint64
|
||||
|
|
@ -273,6 +274,7 @@ func (msg *SetViewportScroll) TypeID() int {
|
|||
|
||||
type CreateDocument struct {
|
||||
message
|
||||
|
||||
}
|
||||
|
||||
func (msg *CreateDocument) Encode() []byte {
|
||||
|
|
@ -2286,31 +2288,6 @@ func (msg *SessionSearch) TypeID() int {
|
|||
return 127
|
||||
}
|
||||
|
||||
type IOSBatchMeta struct {
|
||||
message
|
||||
Timestamp uint64
|
||||
Length uint64
|
||||
FirstIndex uint64
|
||||
}
|
||||
|
||||
func (msg *IOSBatchMeta) Encode() []byte {
|
||||
buf := make([]byte, 31)
|
||||
buf[0] = 107
|
||||
p := 1
|
||||
p = WriteUint(msg.Timestamp, buf, p)
|
||||
p = WriteUint(msg.Length, buf, p)
|
||||
p = WriteUint(msg.FirstIndex, buf, p)
|
||||
return buf[:p]
|
||||
}
|
||||
|
||||
func (msg *IOSBatchMeta) Decode() Message {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (msg *IOSBatchMeta) TypeID() int {
|
||||
return 107
|
||||
}
|
||||
|
||||
type IOSSessionStart struct {
|
||||
message
|
||||
Timestamp uint64
|
||||
|
|
@ -2535,60 +2512,35 @@ func (msg *IOSCrash) TypeID() int {
|
|||
return 97
|
||||
}
|
||||
|
||||
type IOSScreenEnter struct {
|
||||
type IOSViewComponentEvent struct {
|
||||
message
|
||||
Timestamp uint64
|
||||
Length uint64
|
||||
Title string
|
||||
ScreenName string
|
||||
ViewName string
|
||||
Visible bool
|
||||
}
|
||||
|
||||
func (msg *IOSScreenEnter) Encode() []byte {
|
||||
buf := make([]byte, 41+len(msg.Title)+len(msg.ViewName))
|
||||
func (msg *IOSViewComponentEvent) Encode() []byte {
|
||||
buf := make([]byte, 51+len(msg.ScreenName)+len(msg.ViewName))
|
||||
buf[0] = 98
|
||||
p := 1
|
||||
p = WriteUint(msg.Timestamp, buf, p)
|
||||
p = WriteUint(msg.Length, buf, p)
|
||||
p = WriteString(msg.Title, buf, p)
|
||||
p = WriteString(msg.ScreenName, buf, p)
|
||||
p = WriteString(msg.ViewName, buf, p)
|
||||
p = WriteBoolean(msg.Visible, buf, p)
|
||||
return buf[:p]
|
||||
}
|
||||
|
||||
func (msg *IOSScreenEnter) Decode() Message {
|
||||
func (msg *IOSViewComponentEvent) Decode() Message {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (msg *IOSScreenEnter) TypeID() int {
|
||||
func (msg *IOSViewComponentEvent) TypeID() int {
|
||||
return 98
|
||||
}
|
||||
|
||||
type IOSScreenLeave struct {
|
||||
message
|
||||
Timestamp uint64
|
||||
Length uint64
|
||||
Title string
|
||||
ViewName string
|
||||
}
|
||||
|
||||
func (msg *IOSScreenLeave) Encode() []byte {
|
||||
buf := make([]byte, 41+len(msg.Title)+len(msg.ViewName))
|
||||
buf[0] = 99
|
||||
p := 1
|
||||
p = WriteUint(msg.Timestamp, buf, p)
|
||||
p = WriteUint(msg.Length, buf, p)
|
||||
p = WriteString(msg.Title, buf, p)
|
||||
p = WriteString(msg.ViewName, buf, p)
|
||||
return buf[:p]
|
||||
}
|
||||
|
||||
func (msg *IOSScreenLeave) Decode() Message {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (msg *IOSScreenLeave) TypeID() int {
|
||||
return 99
|
||||
}
|
||||
|
||||
type IOSClickEvent struct {
|
||||
message
|
||||
Timestamp uint64
|
||||
|
|
@ -2730,28 +2682,28 @@ type IOSNetworkCall struct {
|
|||
message
|
||||
Timestamp uint64
|
||||
Length uint64
|
||||
Duration uint64
|
||||
Headers string
|
||||
Body string
|
||||
URL string
|
||||
Success bool
|
||||
Type string
|
||||
Method string
|
||||
URL string
|
||||
Request string
|
||||
Response string
|
||||
Status uint64
|
||||
Duration uint64
|
||||
}
|
||||
|
||||
func (msg *IOSNetworkCall) Encode() []byte {
|
||||
buf := make([]byte, 91+len(msg.Headers)+len(msg.Body)+len(msg.URL)+len(msg.Method))
|
||||
buf := make([]byte, 91+len(msg.Type)+len(msg.Method)+len(msg.URL)+len(msg.Request)+len(msg.Response))
|
||||
buf[0] = 105
|
||||
p := 1
|
||||
p = WriteUint(msg.Timestamp, buf, p)
|
||||
p = WriteUint(msg.Length, buf, p)
|
||||
p = WriteUint(msg.Duration, buf, p)
|
||||
p = WriteString(msg.Headers, buf, p)
|
||||
p = WriteString(msg.Body, buf, p)
|
||||
p = WriteString(msg.URL, buf, p)
|
||||
p = WriteBoolean(msg.Success, buf, p)
|
||||
p = WriteString(msg.Type, buf, p)
|
||||
p = WriteString(msg.Method, buf, p)
|
||||
p = WriteString(msg.URL, buf, p)
|
||||
p = WriteString(msg.Request, buf, p)
|
||||
p = WriteString(msg.Response, buf, p)
|
||||
p = WriteUint(msg.Status, buf, p)
|
||||
p = WriteUint(msg.Duration, buf, p)
|
||||
return buf[:p]
|
||||
}
|
||||
|
||||
|
|
@ -2763,6 +2715,62 @@ func (msg *IOSNetworkCall) TypeID() int {
|
|||
return 105
|
||||
}
|
||||
|
||||
type IOSSwipeEvent struct {
|
||||
message
|
||||
Timestamp uint64
|
||||
Length uint64
|
||||
Label string
|
||||
X uint64
|
||||
Y uint64
|
||||
Direction string
|
||||
}
|
||||
|
||||
func (msg *IOSSwipeEvent) Encode() []byte {
|
||||
buf := make([]byte, 61+len(msg.Label)+len(msg.Direction))
|
||||
buf[0] = 106
|
||||
p := 1
|
||||
p = WriteUint(msg.Timestamp, buf, p)
|
||||
p = WriteUint(msg.Length, buf, p)
|
||||
p = WriteString(msg.Label, buf, p)
|
||||
p = WriteUint(msg.X, buf, p)
|
||||
p = WriteUint(msg.Y, buf, p)
|
||||
p = WriteString(msg.Direction, buf, p)
|
||||
return buf[:p]
|
||||
}
|
||||
|
||||
func (msg *IOSSwipeEvent) Decode() Message {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (msg *IOSSwipeEvent) TypeID() int {
|
||||
return 106
|
||||
}
|
||||
|
||||
type IOSBatchMeta struct {
|
||||
message
|
||||
Timestamp uint64
|
||||
Length uint64
|
||||
FirstIndex uint64
|
||||
}
|
||||
|
||||
func (msg *IOSBatchMeta) Encode() []byte {
|
||||
buf := make([]byte, 31)
|
||||
buf[0] = 107
|
||||
p := 1
|
||||
p = WriteUint(msg.Timestamp, buf, p)
|
||||
p = WriteUint(msg.Length, buf, p)
|
||||
p = WriteUint(msg.FirstIndex, buf, p)
|
||||
return buf[:p]
|
||||
}
|
||||
|
||||
func (msg *IOSBatchMeta) Decode() Message {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (msg *IOSBatchMeta) TypeID() int {
|
||||
return 107
|
||||
}
|
||||
|
||||
type IOSPerformanceAggregated struct {
|
||||
message
|
||||
TimestampStart uint64
|
||||
|
|
@ -2838,3 +2846,4 @@ func (msg *IOSIssueEvent) Decode() Message {
|
|||
func (msg *IOSIssueEvent) TypeID() int {
|
||||
return 111
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1383,21 +1383,6 @@ func DecodeSessionSearch(reader BytesReader) (Message, error) {
|
|||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeIOSBatchMeta(reader BytesReader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &IOSBatchMeta{}
|
||||
if msg.Timestamp, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Length, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.FirstIndex, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeIOSSessionStart(reader BytesReader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &IOSSessionStart{}
|
||||
|
|
@ -1554,37 +1539,22 @@ func DecodeIOSCrash(reader BytesReader) (Message, error) {
|
|||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeIOSScreenEnter(reader BytesReader) (Message, error) {
|
||||
func DecodeIOSViewComponentEvent(reader BytesReader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &IOSScreenEnter{}
|
||||
msg := &IOSViewComponentEvent{}
|
||||
if msg.Timestamp, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Length, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Title, err = reader.ReadString(); err != nil {
|
||||
if msg.ScreenName, err = reader.ReadString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.ViewName, err = reader.ReadString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeIOSScreenLeave(reader BytesReader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &IOSScreenLeave{}
|
||||
if msg.Timestamp, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Length, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Title, err = reader.ReadString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.ViewName, err = reader.ReadString(); err != nil {
|
||||
if msg.Visible, err = reader.ReadBoolean(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, err
|
||||
|
|
@ -1692,27 +1662,66 @@ func DecodeIOSNetworkCall(reader BytesReader) (Message, error) {
|
|||
if msg.Length, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Duration, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Headers, err = reader.ReadString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Body, err = reader.ReadString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.URL, err = reader.ReadString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Success, err = reader.ReadBoolean(); err != nil {
|
||||
if msg.Type, err = reader.ReadString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Method, err = reader.ReadString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.URL, err = reader.ReadString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Request, err = reader.ReadString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Response, err = reader.ReadString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Status, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Duration, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeIOSSwipeEvent(reader BytesReader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &IOSSwipeEvent{}
|
||||
if msg.Timestamp, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Length, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Label, err = reader.ReadString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.X, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Y, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Direction, err = reader.ReadString(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeIOSBatchMeta(reader BytesReader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &IOSBatchMeta{}
|
||||
if msg.Timestamp, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Length, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.FirstIndex, err = reader.ReadUint(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, err
|
||||
}
|
||||
|
||||
|
|
@ -1955,8 +1964,6 @@ func ReadMessage(t uint64, reader BytesReader) (Message, error) {
|
|||
return DecodeSessionEnd(reader)
|
||||
case 127:
|
||||
return DecodeSessionSearch(reader)
|
||||
case 107:
|
||||
return DecodeIOSBatchMeta(reader)
|
||||
case 90:
|
||||
return DecodeIOSSessionStart(reader)
|
||||
case 91:
|
||||
|
|
@ -1974,9 +1981,7 @@ func ReadMessage(t uint64, reader BytesReader) (Message, error) {
|
|||
case 97:
|
||||
return DecodeIOSCrash(reader)
|
||||
case 98:
|
||||
return DecodeIOSScreenEnter(reader)
|
||||
case 99:
|
||||
return DecodeIOSScreenLeave(reader)
|
||||
return DecodeIOSViewComponentEvent(reader)
|
||||
case 100:
|
||||
return DecodeIOSClickEvent(reader)
|
||||
case 101:
|
||||
|
|
@ -1989,6 +1994,10 @@ func ReadMessage(t uint64, reader BytesReader) (Message, error) {
|
|||
return DecodeIOSInternalError(reader)
|
||||
case 105:
|
||||
return DecodeIOSNetworkCall(reader)
|
||||
case 106:
|
||||
return DecodeIOSSwipeEvent(reader)
|
||||
case 107:
|
||||
return DecodeIOSBatchMeta(reader)
|
||||
case 110:
|
||||
return DecodeIOSPerformanceAggregated(reader)
|
||||
case 111:
|
||||
|
|
|
|||
155
backend/pkg/metrics/imagestorage/metrics.go
Normal file
155
backend/pkg/metrics/imagestorage/metrics.go
Normal file
|
|
@ -0,0 +1,155 @@
|
|||
package imagestorage
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
var storageSessionSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "session_size_bytes",
|
||||
Help: "A histogram displaying the size of each session file in bytes prior to any manipulation.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionSize(fileSize float64, fileType string) {
|
||||
storageSessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||
}
|
||||
|
||||
var storageTotalSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sessions_total",
|
||||
Help: "A counter displaying the total number of all processed sessions.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseStorageTotalSessions() {
|
||||
storageTotalSessions.Inc()
|
||||
}
|
||||
|
||||
var storageSkippedSessionSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "session_size_bytes",
|
||||
Help: "A histogram displaying the size of each skipped session file in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSkippedSessionSize(fileSize float64, fileType string) {
|
||||
storageSkippedSessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||
}
|
||||
|
||||
var storageTotalSkippedSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sessions_skipped_total",
|
||||
Help: "A counter displaying the total number of all skipped sessions because of the size limits.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseStorageTotalSkippedSessions() {
|
||||
storageTotalSkippedSessions.Inc()
|
||||
}
|
||||
|
||||
var storageSessionReadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "read_duration_seconds",
|
||||
Help: "A histogram displaying the duration of reading for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionReadDuration(durMillis float64, fileType string) {
|
||||
storageSessionReadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionSortDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sort_duration_seconds",
|
||||
Help: "A histogram displaying the duration of sorting for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionSortDuration(durMillis float64, fileType string) {
|
||||
storageSessionSortDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionEncryptionDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "encryption_duration_seconds",
|
||||
Help: "A histogram displaying the duration of encoding for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionEncryptionDuration(durMillis float64, fileType string) {
|
||||
storageSessionEncryptionDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionCompressDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "compress_duration_seconds",
|
||||
Help: "A histogram displaying the duration of compressing for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionCompressDuration(durMillis float64, fileType string) {
|
||||
storageSessionCompressDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionUploadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "upload_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading to s3 for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionUploadDuration(durMillis float64, fileType string) {
|
||||
storageSessionUploadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionCompressionRatio = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "compression_ratio",
|
||||
Help: "A histogram displaying the compression ratio of mob files for each session.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionCompressionRatio(ratio float64, fileType string) {
|
||||
storageSessionCompressionRatio.WithLabelValues(fileType).Observe(ratio)
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
storageSessionSize,
|
||||
storageTotalSessions,
|
||||
storageSessionReadDuration,
|
||||
storageSessionSortDuration,
|
||||
storageSessionEncryptionDuration,
|
||||
storageSessionCompressDuration,
|
||||
storageSessionUploadDuration,
|
||||
storageSessionCompressionRatio,
|
||||
}
|
||||
}
|
||||
155
backend/pkg/metrics/videostorage/metrics.go
Normal file
155
backend/pkg/metrics/videostorage/metrics.go
Normal file
|
|
@ -0,0 +1,155 @@
|
|||
package videostorage
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
var storageSessionSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "session_size_bytes",
|
||||
Help: "A histogram displaying the size of each session file in bytes prior to any manipulation.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionSize(fileSize float64, fileType string) {
|
||||
storageSessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||
}
|
||||
|
||||
var storageTotalSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sessions_total",
|
||||
Help: "A counter displaying the total number of all processed sessions.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseStorageTotalSessions() {
|
||||
storageTotalSessions.Inc()
|
||||
}
|
||||
|
||||
var storageSkippedSessionSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "session_size_bytes",
|
||||
Help: "A histogram displaying the size of each skipped session file in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSkippedSessionSize(fileSize float64, fileType string) {
|
||||
storageSkippedSessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||
}
|
||||
|
||||
var storageTotalSkippedSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sessions_skipped_total",
|
||||
Help: "A counter displaying the total number of all skipped sessions because of the size limits.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseStorageTotalSkippedSessions() {
|
||||
storageTotalSkippedSessions.Inc()
|
||||
}
|
||||
|
||||
var storageSessionReadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "read_duration_seconds",
|
||||
Help: "A histogram displaying the duration of reading for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionReadDuration(durMillis float64, fileType string) {
|
||||
storageSessionReadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionSortDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sort_duration_seconds",
|
||||
Help: "A histogram displaying the duration of sorting for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionSortDuration(durMillis float64, fileType string) {
|
||||
storageSessionSortDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionEncryptionDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "encryption_duration_seconds",
|
||||
Help: "A histogram displaying the duration of encoding for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionEncryptionDuration(durMillis float64, fileType string) {
|
||||
storageSessionEncryptionDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionCompressDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "compress_duration_seconds",
|
||||
Help: "A histogram displaying the duration of compressing for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionCompressDuration(durMillis float64, fileType string) {
|
||||
storageSessionCompressDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionUploadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "upload_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading to s3 for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionUploadDuration(durMillis float64, fileType string) {
|
||||
storageSessionUploadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionCompressionRatio = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "compression_ratio",
|
||||
Help: "A histogram displaying the compression ratio of mob files for each session.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionCompressionRatio(ratio float64, fileType string) {
|
||||
storageSessionCompressionRatio.WithLabelValues(fileType).Observe(ratio)
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
storageSessionSize,
|
||||
storageTotalSessions,
|
||||
storageSessionReadDuration,
|
||||
storageSessionSortDuration,
|
||||
storageSessionEncryptionDuration,
|
||||
storageSessionCompressDuration,
|
||||
storageSessionUploadDuration,
|
||||
storageSessionCompressionRatio,
|
||||
}
|
||||
}
|
||||
|
|
@ -16,6 +16,7 @@ const (
|
|||
type ObjectStorage interface {
|
||||
Upload(reader io.Reader, key string, contentType string, compression CompressionType) error
|
||||
Get(key string) (io.ReadCloser, error)
|
||||
GetAll(key string) ([]io.ReadCloser, error)
|
||||
Exists(key string) bool
|
||||
GetCreationTime(key string) *time.Time
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import (
|
|||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
|
@ -102,6 +103,47 @@ func (s *storageImpl) Get(key string) (io.ReadCloser, error) {
|
|||
return out.Body, nil
|
||||
}
|
||||
|
||||
func (s *storageImpl) GetAll(key string) ([]io.ReadCloser, error) {
|
||||
out, err := s.svc.GetObject(&s3.GetObjectInput{
|
||||
Bucket: s.bucket,
|
||||
Key: &key,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []io.ReadCloser{out.Body}, nil
|
||||
}
|
||||
|
||||
func downloadS3Files(bucket, prefix string) {
|
||||
sess := _session.Must(_session.NewSession(&aws.Config{
|
||||
Region: aws.String("us-west-1"), // Change this to your region
|
||||
}))
|
||||
svc := s3.New(sess)
|
||||
|
||||
resp, err := svc.ListObjects(&s3.ListObjectsInput{Bucket: &bucket, Prefix: &prefix})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, item := range resp.Contents {
|
||||
file, err := os.Create(*item.Key)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
downloader := s3manager.NewDownloader(sess)
|
||||
_, err = downloader.Download(file,
|
||||
&s3.GetObjectInput{
|
||||
Bucket: &bucket,
|
||||
Key: item.Key,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *storageImpl) Exists(key string) bool {
|
||||
_, err := s.svc.HeadObject(&s3.HeadObjectInput{
|
||||
Bucket: s.bucket,
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ type Project struct {
|
|||
SampleRate byte
|
||||
SaveRequestPayloads bool
|
||||
BeaconSize int64
|
||||
Platform string
|
||||
Metadata1 *string
|
||||
Metadata2 *string
|
||||
Metadata3 *string
|
||||
|
|
@ -58,3 +59,11 @@ func (p *Project) GetMetadataNo(key string) uint {
|
|||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (p *Project) IsMobile() bool {
|
||||
return p.Platform == "ios" || p.Platform == "android"
|
||||
}
|
||||
|
||||
func (p *Project) IsWeb() bool {
|
||||
return p.Platform == "web"
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,14 +3,14 @@ package projects
|
|||
func (c *projectsImpl) getProjectByKey(projectKey string) (*Project, error) {
|
||||
p := &Project{ProjectKey: projectKey}
|
||||
if err := c.db.QueryRow(`
|
||||
SELECT project_id, max_session_duration, save_request_payloads, sample_rate, beacon_size,
|
||||
SELECT project_id, max_session_duration, save_request_payloads, sample_rate, beacon_size, platform,
|
||||
metadata_1, metadata_2, metadata_3, metadata_4, metadata_5,
|
||||
metadata_6, metadata_7, metadata_8, metadata_9, metadata_10
|
||||
FROM projects
|
||||
WHERE project_key=$1 AND active = true
|
||||
`,
|
||||
projectKey,
|
||||
).Scan(&p.ProjectID, &p.MaxSessionDuration, &p.SaveRequestPayloads, &p.SampleRate, &p.BeaconSize,
|
||||
).Scan(&p.ProjectID, &p.MaxSessionDuration, &p.SaveRequestPayloads, &p.SampleRate, &p.BeaconSize, &p.Platform,
|
||||
&p.Metadata1, &p.Metadata2, &p.Metadata3, &p.Metadata4, &p.Metadata5,
|
||||
&p.Metadata6, &p.Metadata7, &p.Metadata8, &p.Metadata9, &p.Metadata10); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -21,14 +21,14 @@ func (c *projectsImpl) getProjectByKey(projectKey string) (*Project, error) {
|
|||
func (c *projectsImpl) getProject(projectID uint32) (*Project, error) {
|
||||
p := &Project{ProjectID: projectID}
|
||||
if err := c.db.QueryRow(`
|
||||
SELECT project_key, max_session_duration, save_request_payloads, sample_rate, beacon_size,
|
||||
SELECT project_key, max_session_duration, save_request_payloads, sample_rate, beacon_size, platform,
|
||||
metadata_1, metadata_2, metadata_3, metadata_4, metadata_5,
|
||||
metadata_6, metadata_7, metadata_8, metadata_9, metadata_10
|
||||
FROM projects
|
||||
WHERE project_id=$1 AND active = true
|
||||
`,
|
||||
projectID,
|
||||
).Scan(&p.ProjectKey, &p.MaxSessionDuration, &p.SaveRequestPayloads, &p.SampleRate, &p.BeaconSize,
|
||||
).Scan(&p.ProjectKey, &p.MaxSessionDuration, &p.SaveRequestPayloads, &p.SampleRate, &p.BeaconSize, &p.Platform,
|
||||
&p.Metadata1, &p.Metadata2, &p.Metadata3, &p.Metadata4, &p.Metadata5,
|
||||
&p.Metadata6, &p.Metadata7, &p.Metadata8, &p.Metadata9, &p.Metadata10); err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ func (s *storageImpl) AddUnStarted(sess *UnStartedSession) error {
|
|||
|
||||
func (s *storageImpl) Get(sessionID uint64) (*Session, error) {
|
||||
sess := &Session{SessionID: sessionID}
|
||||
var revID, userOSVersion, userBrowserVersion, userState, userCity *string
|
||||
var revID, userOSVersion, userBrowser, userBrowserVersion, userState, userCity *string
|
||||
var issueTypes pgtype.EnumArray
|
||||
if err := s.db.QueryRow(`
|
||||
SELECT platform,
|
||||
|
|
@ -117,7 +117,7 @@ func (s *storageImpl) Get(sessionID uint64) (*Session, error) {
|
|||
&revID, &sess.TrackerVersion,
|
||||
&sess.UserID, &sess.UserAnonymousID, &sess.Referrer,
|
||||
&sess.PagesCount, &sess.EventsCount, &sess.ErrorsCount, &issueTypes,
|
||||
&sess.UserBrowser, &userBrowserVersion, &sess.IssueScore,
|
||||
&userBrowser, &userBrowserVersion, &sess.IssueScore,
|
||||
&sess.Metadata1, &sess.Metadata2, &sess.Metadata3, &sess.Metadata4, &sess.Metadata5,
|
||||
&sess.Metadata6, &sess.Metadata7, &sess.Metadata8, &sess.Metadata9, &sess.Metadata10); err != nil {
|
||||
return nil, err
|
||||
|
|
@ -125,6 +125,9 @@ func (s *storageImpl) Get(sessionID uint64) (*Session, error) {
|
|||
if userOSVersion != nil {
|
||||
sess.UserOSVersion = *userOSVersion
|
||||
}
|
||||
if userBrowser != nil {
|
||||
sess.UserBrowser = *userBrowser
|
||||
}
|
||||
if userBrowserVersion != nil {
|
||||
sess.UserBrowserVersion = *userBrowserVersion
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package azure
|
|||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
|
|
@ -87,6 +88,10 @@ func (s *storageImpl) Get(key string) (io.ReadCloser, error) {
|
|||
return io.NopCloser(bytes.NewReader(downloadedData.Bytes())), err
|
||||
}
|
||||
|
||||
func (s *storageImpl) GetAll(key string) ([]io.ReadCloser, error) {
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (s *storageImpl) Exists(key string) bool {
|
||||
ctx := context.Background()
|
||||
get, err := s.client.DownloadStream(ctx, s.container, key, nil)
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ class CreateDocument(Message):
|
|||
__id__ = 7
|
||||
|
||||
def __init__(self, ):
|
||||
pass
|
||||
|
||||
|
||||
|
||||
class CreateElementNode(Message):
|
||||
|
|
@ -759,6 +759,20 @@ class ResourceTiming(Message):
|
|||
self.cached = cached
|
||||
|
||||
|
||||
class TabChange(Message):
|
||||
__id__ = 117
|
||||
|
||||
def __init__(self, tab_id):
|
||||
self.tab_id = tab_id
|
||||
|
||||
|
||||
class TabData(Message):
|
||||
__id__ = 118
|
||||
|
||||
def __init__(self, tab_id):
|
||||
self.tab_id = tab_id
|
||||
|
||||
|
||||
class IssueEvent(Message):
|
||||
__id__ = 125
|
||||
|
||||
|
|
@ -788,15 +802,6 @@ class SessionSearch(Message):
|
|||
self.partition = partition
|
||||
|
||||
|
||||
class IOSBatchMeta(Message):
|
||||
__id__ = 107
|
||||
|
||||
def __init__(self, timestamp, length, first_index):
|
||||
self.timestamp = timestamp
|
||||
self.length = length
|
||||
self.first_index = first_index
|
||||
|
||||
|
||||
class IOSSessionStart(Message):
|
||||
__id__ = 90
|
||||
|
||||
|
|
@ -881,24 +886,15 @@ class IOSCrash(Message):
|
|||
self.stacktrace = stacktrace
|
||||
|
||||
|
||||
class IOSScreenEnter(Message):
|
||||
class IOSViewComponentEvent(Message):
|
||||
__id__ = 98
|
||||
|
||||
def __init__(self, timestamp, length, title, view_name):
|
||||
def __init__(self, timestamp, length, screen_name, view_name, visible):
|
||||
self.timestamp = timestamp
|
||||
self.length = length
|
||||
self.title = title
|
||||
self.view_name = view_name
|
||||
|
||||
|
||||
class IOSScreenLeave(Message):
|
||||
__id__ = 99
|
||||
|
||||
def __init__(self, timestamp, length, title, view_name):
|
||||
self.timestamp = timestamp
|
||||
self.length = length
|
||||
self.title = title
|
||||
self.screen_name = screen_name
|
||||
self.view_name = view_name
|
||||
self.visible = visible
|
||||
|
||||
|
||||
class IOSClickEvent(Message):
|
||||
|
|
@ -955,16 +951,37 @@ class IOSInternalError(Message):
|
|||
class IOSNetworkCall(Message):
|
||||
__id__ = 105
|
||||
|
||||
def __init__(self, timestamp, length, duration, headers, body, url, success, method, status):
|
||||
def __init__(self, timestamp, length, type, method, url, request, response, status, duration):
|
||||
self.timestamp = timestamp
|
||||
self.length = length
|
||||
self.duration = duration
|
||||
self.headers = headers
|
||||
self.body = body
|
||||
self.url = url
|
||||
self.success = success
|
||||
self.type = type
|
||||
self.method = method
|
||||
self.url = url
|
||||
self.request = request
|
||||
self.response = response
|
||||
self.status = status
|
||||
self.duration = duration
|
||||
|
||||
|
||||
class IOSSwipeEvent(Message):
|
||||
__id__ = 106
|
||||
|
||||
def __init__(self, timestamp, length, label, x, y, direction):
|
||||
self.timestamp = timestamp
|
||||
self.length = length
|
||||
self.label = label
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.direction = direction
|
||||
|
||||
|
||||
class IOSBatchMeta(Message):
|
||||
__id__ = 107
|
||||
|
||||
def __init__(self, timestamp, length, first_index):
|
||||
self.timestamp = timestamp
|
||||
self.length = length
|
||||
self.first_index = first_index
|
||||
|
||||
|
||||
class IOSPerformanceAggregated(Message):
|
||||
|
|
|
|||
|
|
@ -1121,6 +1121,24 @@ cdef class ResourceTiming(PyMessage):
|
|||
self.cached = cached
|
||||
|
||||
|
||||
cdef class TabChange(PyMessage):
|
||||
cdef public int __id__
|
||||
cdef public str tab_id
|
||||
|
||||
def __init__(self, str tab_id):
|
||||
self.__id__ = 117
|
||||
self.tab_id = tab_id
|
||||
|
||||
|
||||
cdef class TabData(PyMessage):
|
||||
cdef public int __id__
|
||||
cdef public str tab_id
|
||||
|
||||
def __init__(self, str tab_id):
|
||||
self.__id__ = 118
|
||||
self.tab_id = tab_id
|
||||
|
||||
|
||||
cdef class IssueEvent(PyMessage):
|
||||
cdef public int __id__
|
||||
cdef public unsigned long message_id
|
||||
|
|
@ -1164,19 +1182,6 @@ cdef class SessionSearch(PyMessage):
|
|||
self.partition = partition
|
||||
|
||||
|
||||
cdef class IOSBatchMeta(PyMessage):
|
||||
cdef public int __id__
|
||||
cdef public unsigned long timestamp
|
||||
cdef public unsigned long length
|
||||
cdef public unsigned long first_index
|
||||
|
||||
def __init__(self, unsigned long timestamp, unsigned long length, unsigned long first_index):
|
||||
self.__id__ = 107
|
||||
self.timestamp = timestamp
|
||||
self.length = length
|
||||
self.first_index = first_index
|
||||
|
||||
|
||||
cdef class IOSSessionStart(PyMessage):
|
||||
cdef public int __id__
|
||||
cdef public unsigned long timestamp
|
||||
|
|
@ -1305,34 +1310,21 @@ cdef class IOSCrash(PyMessage):
|
|||
self.stacktrace = stacktrace
|
||||
|
||||
|
||||
cdef class IOSScreenEnter(PyMessage):
|
||||
cdef class IOSViewComponentEvent(PyMessage):
|
||||
cdef public int __id__
|
||||
cdef public unsigned long timestamp
|
||||
cdef public unsigned long length
|
||||
cdef public str title
|
||||
cdef public str screen_name
|
||||
cdef public str view_name
|
||||
cdef public bint visible
|
||||
|
||||
def __init__(self, unsigned long timestamp, unsigned long length, str title, str view_name):
|
||||
def __init__(self, unsigned long timestamp, unsigned long length, str screen_name, str view_name, bint visible):
|
||||
self.__id__ = 98
|
||||
self.timestamp = timestamp
|
||||
self.length = length
|
||||
self.title = title
|
||||
self.view_name = view_name
|
||||
|
||||
|
||||
cdef class IOSScreenLeave(PyMessage):
|
||||
cdef public int __id__
|
||||
cdef public unsigned long timestamp
|
||||
cdef public unsigned long length
|
||||
cdef public str title
|
||||
cdef public str view_name
|
||||
|
||||
def __init__(self, unsigned long timestamp, unsigned long length, str title, str view_name):
|
||||
self.__id__ = 99
|
||||
self.timestamp = timestamp
|
||||
self.length = length
|
||||
self.title = title
|
||||
self.screen_name = screen_name
|
||||
self.view_name = view_name
|
||||
self.visible = visible
|
||||
|
||||
|
||||
cdef class IOSClickEvent(PyMessage):
|
||||
|
|
@ -1416,25 +1408,57 @@ cdef class IOSNetworkCall(PyMessage):
|
|||
cdef public int __id__
|
||||
cdef public unsigned long timestamp
|
||||
cdef public unsigned long length
|
||||
cdef public unsigned long duration
|
||||
cdef public str headers
|
||||
cdef public str body
|
||||
cdef public str url
|
||||
cdef public bint success
|
||||
cdef public str type
|
||||
cdef public str method
|
||||
cdef public str url
|
||||
cdef public str request
|
||||
cdef public str response
|
||||
cdef public unsigned long status
|
||||
cdef public unsigned long duration
|
||||
|
||||
def __init__(self, unsigned long timestamp, unsigned long length, unsigned long duration, str headers, str body, str url, bint success, str method, unsigned long status):
|
||||
def __init__(self, unsigned long timestamp, unsigned long length, str type, str method, str url, str request, str response, unsigned long status, unsigned long duration):
|
||||
self.__id__ = 105
|
||||
self.timestamp = timestamp
|
||||
self.length = length
|
||||
self.duration = duration
|
||||
self.headers = headers
|
||||
self.body = body
|
||||
self.url = url
|
||||
self.success = success
|
||||
self.type = type
|
||||
self.method = method
|
||||
self.url = url
|
||||
self.request = request
|
||||
self.response = response
|
||||
self.status = status
|
||||
self.duration = duration
|
||||
|
||||
|
||||
cdef class IOSSwipeEvent(PyMessage):
|
||||
cdef public int __id__
|
||||
cdef public unsigned long timestamp
|
||||
cdef public unsigned long length
|
||||
cdef public str label
|
||||
cdef public unsigned long x
|
||||
cdef public unsigned long y
|
||||
cdef public str direction
|
||||
|
||||
def __init__(self, unsigned long timestamp, unsigned long length, str label, unsigned long x, unsigned long y, str direction):
|
||||
self.__id__ = 106
|
||||
self.timestamp = timestamp
|
||||
self.length = length
|
||||
self.label = label
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.direction = direction
|
||||
|
||||
|
||||
cdef class IOSBatchMeta(PyMessage):
|
||||
cdef public int __id__
|
||||
cdef public unsigned long timestamp
|
||||
cdef public unsigned long length
|
||||
cdef public unsigned long first_index
|
||||
|
||||
def __init__(self, unsigned long timestamp, unsigned long length, unsigned long first_index):
|
||||
self.__id__ = 107
|
||||
self.timestamp = timestamp
|
||||
self.length = length
|
||||
self.first_index = first_index
|
||||
|
||||
|
||||
cdef class IOSPerformanceAggregated(PyMessage):
|
||||
|
|
|
|||
|
|
@ -43,8 +43,7 @@ class MessageCodec(Codec):
|
|||
try:
|
||||
decoded = int.from_bytes(b, "little", signed=False)
|
||||
except Exception as e:
|
||||
print(f"Error while decoding message key (SessionID) from {b}\n{e}")
|
||||
raise e
|
||||
raise UnicodeDecodeError(f"Error while decoding message key (SessionID) from {b}\n{e}")
|
||||
return decoded
|
||||
|
||||
def decode_detailed(self, b: bytes) -> List[Message]:
|
||||
|
|
@ -722,13 +721,6 @@ class MessageCodec(Codec):
|
|||
partition=self.read_uint(reader)
|
||||
)
|
||||
|
||||
if message_id == 107:
|
||||
return IOSBatchMeta(
|
||||
timestamp=self.read_uint(reader),
|
||||
length=self.read_uint(reader),
|
||||
first_index=self.read_uint(reader)
|
||||
)
|
||||
|
||||
if message_id == 90:
|
||||
return IOSSessionStart(
|
||||
timestamp=self.read_uint(reader),
|
||||
|
|
@ -798,19 +790,12 @@ class MessageCodec(Codec):
|
|||
)
|
||||
|
||||
if message_id == 98:
|
||||
return IOSScreenEnter(
|
||||
return IOSViewComponentEvent(
|
||||
timestamp=self.read_uint(reader),
|
||||
length=self.read_uint(reader),
|
||||
title=self.read_string(reader),
|
||||
view_name=self.read_string(reader)
|
||||
)
|
||||
|
||||
if message_id == 99:
|
||||
return IOSScreenLeave(
|
||||
timestamp=self.read_uint(reader),
|
||||
length=self.read_uint(reader),
|
||||
title=self.read_string(reader),
|
||||
view_name=self.read_string(reader)
|
||||
screen_name=self.read_string(reader),
|
||||
view_name=self.read_string(reader),
|
||||
visible=self.read_boolean(reader)
|
||||
)
|
||||
|
||||
if message_id == 100:
|
||||
|
|
@ -858,13 +843,30 @@ class MessageCodec(Codec):
|
|||
return IOSNetworkCall(
|
||||
timestamp=self.read_uint(reader),
|
||||
length=self.read_uint(reader),
|
||||
duration=self.read_uint(reader),
|
||||
headers=self.read_string(reader),
|
||||
body=self.read_string(reader),
|
||||
url=self.read_string(reader),
|
||||
success=self.read_boolean(reader),
|
||||
type=self.read_string(reader),
|
||||
method=self.read_string(reader),
|
||||
status=self.read_uint(reader)
|
||||
url=self.read_string(reader),
|
||||
request=self.read_string(reader),
|
||||
response=self.read_string(reader),
|
||||
status=self.read_uint(reader),
|
||||
duration=self.read_uint(reader)
|
||||
)
|
||||
|
||||
if message_id == 106:
|
||||
return IOSSwipeEvent(
|
||||
timestamp=self.read_uint(reader),
|
||||
length=self.read_uint(reader),
|
||||
label=self.read_string(reader),
|
||||
x=self.read_uint(reader),
|
||||
y=self.read_uint(reader),
|
||||
direction=self.read_string(reader)
|
||||
)
|
||||
|
||||
if message_id == 107:
|
||||
return IOSBatchMeta(
|
||||
timestamp=self.read_uint(reader),
|
||||
length=self.read_uint(reader),
|
||||
first_index=self.read_uint(reader)
|
||||
)
|
||||
|
||||
if message_id == 110:
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -689,32 +689,6 @@ export default class RawMessageReader extends PrimitiveReader {
|
|||
};
|
||||
}
|
||||
|
||||
case 90: {
|
||||
const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() }
|
||||
const projectID = this.readUint(); if (projectID === null) { return resetPointer() }
|
||||
const trackerVersion = this.readString(); if (trackerVersion === null) { return resetPointer() }
|
||||
const revID = this.readString(); if (revID === null) { return resetPointer() }
|
||||
const userUUID = this.readString(); if (userUUID === null) { return resetPointer() }
|
||||
const userOS = this.readString(); if (userOS === null) { return resetPointer() }
|
||||
const userOSVersion = this.readString(); if (userOSVersion === null) { return resetPointer() }
|
||||
const userDevice = this.readString(); if (userDevice === null) { return resetPointer() }
|
||||
const userDeviceType = this.readString(); if (userDeviceType === null) { return resetPointer() }
|
||||
const userCountry = this.readString(); if (userCountry === null) { return resetPointer() }
|
||||
return {
|
||||
tp: MType.IosSessionStart,
|
||||
timestamp,
|
||||
projectID,
|
||||
trackerVersion,
|
||||
revID,
|
||||
userUUID,
|
||||
userOS,
|
||||
userOSVersion,
|
||||
userDevice,
|
||||
userDeviceType,
|
||||
userCountry,
|
||||
};
|
||||
}
|
||||
|
||||
case 93: {
|
||||
const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() }
|
||||
const length = this.readUint(); if (length === null) { return resetPointer() }
|
||||
|
|
@ -763,6 +737,22 @@ export default class RawMessageReader extends PrimitiveReader {
|
|||
};
|
||||
}
|
||||
|
||||
case 101: {
|
||||
const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() }
|
||||
const length = this.readUint(); if (length === null) { return resetPointer() }
|
||||
const value = this.readString(); if (value === null) { return resetPointer() }
|
||||
const valueMasked = this.readBoolean(); if (valueMasked === null) { return resetPointer() }
|
||||
const label = this.readString(); if (label === null) { return resetPointer() }
|
||||
return {
|
||||
tp: MType.IosInputEvent,
|
||||
timestamp,
|
||||
length,
|
||||
value,
|
||||
valueMasked,
|
||||
label,
|
||||
};
|
||||
}
|
||||
|
||||
case 102: {
|
||||
const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() }
|
||||
const length = this.readUint(); if (length === null) { return resetPointer() }
|
||||
|
|
@ -794,24 +784,42 @@ export default class RawMessageReader extends PrimitiveReader {
|
|||
case 105: {
|
||||
const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() }
|
||||
const length = this.readUint(); if (length === null) { return resetPointer() }
|
||||
const duration = this.readUint(); if (duration === null) { return resetPointer() }
|
||||
const headers = this.readString(); if (headers === null) { return resetPointer() }
|
||||
const body = this.readString(); if (body === null) { return resetPointer() }
|
||||
const url = this.readString(); if (url === null) { return resetPointer() }
|
||||
const success = this.readBoolean(); if (success === null) { return resetPointer() }
|
||||
const type = this.readString(); if (type === null) { return resetPointer() }
|
||||
const method = this.readString(); if (method === null) { return resetPointer() }
|
||||
const url = this.readString(); if (url === null) { return resetPointer() }
|
||||
const request = this.readString(); if (request === null) { return resetPointer() }
|
||||
const response = this.readString(); if (response === null) { return resetPointer() }
|
||||
const status = this.readUint(); if (status === null) { return resetPointer() }
|
||||
const duration = this.readUint(); if (duration === null) { return resetPointer() }
|
||||
return {
|
||||
tp: MType.IosNetworkCall,
|
||||
timestamp,
|
||||
length,
|
||||
duration,
|
||||
headers,
|
||||
body,
|
||||
url,
|
||||
success,
|
||||
type,
|
||||
method,
|
||||
url,
|
||||
request,
|
||||
response,
|
||||
status,
|
||||
duration,
|
||||
};
|
||||
}
|
||||
|
||||
case 106: {
|
||||
const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() }
|
||||
const length = this.readUint(); if (length === null) { return resetPointer() }
|
||||
const label = this.readString(); if (label === null) { return resetPointer() }
|
||||
const x = this.readUint(); if (x === null) { return resetPointer() }
|
||||
const y = this.readUint(); if (y === null) { return resetPointer() }
|
||||
const direction = this.readString(); if (direction === null) { return resetPointer() }
|
||||
return {
|
||||
tp: MType.IosSwipeEvent,
|
||||
timestamp,
|
||||
length,
|
||||
label,
|
||||
x,
|
||||
y,
|
||||
direction,
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
import { MType } from './raw.gen'
|
||||
|
||||
const DOM_TYPES = [0,4,5,6,7,8,9,10,11,12,13,14,15,16,18,19,20,37,38,49,50,51,54,55,57,58,59,60,61,67,69,70,71,72,73,74,75,76,77,113,114,117,118,90,93,96,100,102,103,105]
|
||||
const DOM_TYPES = [0,4,5,6,7,8,9,10,11,12,13,14,15,16,18,19,20,37,38,49,50,51,54,55,57,58,59,60,61,67,69,70,71,72,73,74,75,76,77,113,114,117,118,93,96,100,101,102,103,105,106]
|
||||
export function isDOMType(t: MType) {
|
||||
return DOM_TYPES.includes(t)
|
||||
}
|
||||
|
|
@ -60,13 +60,14 @@ import type {
|
|||
RawResourceTiming,
|
||||
RawTabChange,
|
||||
RawTabData,
|
||||
RawIosSessionStart,
|
||||
RawIosCustomEvent,
|
||||
RawIosScreenChanges,
|
||||
RawIosClickEvent,
|
||||
RawIosInputEvent,
|
||||
RawIosPerformanceEvent,
|
||||
RawIosLog,
|
||||
RawIosNetworkCall,
|
||||
RawIosSwipeEvent,
|
||||
} from './raw.gen'
|
||||
|
||||
export type Message = RawMessage & Timed
|
||||
|
|
@ -184,17 +185,19 @@ export type TabChange = RawTabChange & Timed
|
|||
|
||||
export type TabData = RawTabData & Timed
|
||||
|
||||
export type IosSessionStart = RawIosSessionStart & Timed
|
||||
|
||||
export type IosCustomEvent = RawIosCustomEvent & Timed
|
||||
|
||||
export type IosScreenChanges = RawIosScreenChanges & Timed
|
||||
|
||||
export type IosClickEvent = RawIosClickEvent & Timed
|
||||
|
||||
export type IosInputEvent = RawIosInputEvent & Timed
|
||||
|
||||
export type IosPerformanceEvent = RawIosPerformanceEvent & Timed
|
||||
|
||||
export type IosLog = RawIosLog & Timed
|
||||
|
||||
export type IosNetworkCall = RawIosNetworkCall & Timed
|
||||
|
||||
export type IosSwipeEvent = RawIosSwipeEvent & Timed
|
||||
|
||||
|
|
|
|||
|
|
@ -58,13 +58,14 @@ export const enum MType {
|
|||
ResourceTiming = 116,
|
||||
TabChange = 117,
|
||||
TabData = 118,
|
||||
IosSessionStart = 90,
|
||||
IosCustomEvent = 93,
|
||||
IosScreenChanges = 96,
|
||||
IosClickEvent = 100,
|
||||
IosInputEvent = 101,
|
||||
IosPerformanceEvent = 102,
|
||||
IosLog = 103,
|
||||
IosNetworkCall = 105,
|
||||
IosSwipeEvent = 106,
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -459,20 +460,6 @@ export interface RawTabData {
|
|||
tabId: string,
|
||||
}
|
||||
|
||||
export interface RawIosSessionStart {
|
||||
tp: MType.IosSessionStart,
|
||||
timestamp: number,
|
||||
projectID: number,
|
||||
trackerVersion: string,
|
||||
revID: string,
|
||||
userUUID: string,
|
||||
userOS: string,
|
||||
userOSVersion: string,
|
||||
userDevice: string,
|
||||
userDeviceType: string,
|
||||
userCountry: string,
|
||||
}
|
||||
|
||||
export interface RawIosCustomEvent {
|
||||
tp: MType.IosCustomEvent,
|
||||
timestamp: number,
|
||||
|
|
@ -500,6 +487,15 @@ export interface RawIosClickEvent {
|
|||
y: number,
|
||||
}
|
||||
|
||||
export interface RawIosInputEvent {
|
||||
tp: MType.IosInputEvent,
|
||||
timestamp: number,
|
||||
length: number,
|
||||
value: string,
|
||||
valueMasked: boolean,
|
||||
label: string,
|
||||
}
|
||||
|
||||
export interface RawIosPerformanceEvent {
|
||||
tp: MType.IosPerformanceEvent,
|
||||
timestamp: number,
|
||||
|
|
@ -520,14 +516,24 @@ export interface RawIosNetworkCall {
|
|||
tp: MType.IosNetworkCall,
|
||||
timestamp: number,
|
||||
length: number,
|
||||
duration: number,
|
||||
headers: string,
|
||||
body: string,
|
||||
url: string,
|
||||
success: boolean,
|
||||
type: string,
|
||||
method: string,
|
||||
url: string,
|
||||
request: string,
|
||||
response: string,
|
||||
status: number,
|
||||
duration: number,
|
||||
}
|
||||
|
||||
export interface RawIosSwipeEvent {
|
||||
tp: MType.IosSwipeEvent,
|
||||
timestamp: number,
|
||||
length: number,
|
||||
label: string,
|
||||
x: number,
|
||||
y: number,
|
||||
direction: string,
|
||||
}
|
||||
|
||||
|
||||
export type RawMessage = RawTimestamp | RawSetPageLocation | RawSetViewportSize | RawSetViewportScroll | RawCreateDocument | RawCreateElementNode | RawCreateTextNode | RawMoveNode | RawRemoveNode | RawSetNodeAttribute | RawRemoveNodeAttribute | RawSetNodeData | RawSetCssData | RawSetNodeScroll | RawSetInputValue | RawSetInputChecked | RawMouseMove | RawNetworkRequest | RawConsoleLog | RawCssInsertRule | RawCssDeleteRule | RawFetch | RawProfiler | RawOTable | RawRedux | RawVuex | RawMobX | RawNgRx | RawGraphQl | RawPerformanceTrack | RawStringDict | RawSetNodeAttributeDict | RawResourceTimingDeprecated | RawConnectionInformation | RawSetPageVisibility | RawLoadFontFace | RawSetNodeFocus | RawLongTask | RawSetNodeAttributeURLBased | RawSetCssDataURLBased | RawCssInsertRuleURLBased | RawMouseClick | RawCreateIFrameDocument | RawAdoptedSsReplaceURLBased | RawAdoptedSsReplace | RawAdoptedSsInsertRuleURLBased | RawAdoptedSsInsertRule | RawAdoptedSsDeleteRule | RawAdoptedSsAddOwner | RawAdoptedSsRemoveOwner | RawZustand | RawSelectionChange | RawMouseThrashing | RawResourceTiming | RawTabChange | RawTabData | RawIosSessionStart | RawIosCustomEvent | RawIosScreenChanges | RawIosClickEvent | RawIosPerformanceEvent | RawIosLog | RawIosNetworkCall;
|
||||
export type RawMessage = RawTimestamp | RawSetPageLocation | RawSetViewportSize | RawSetViewportScroll | RawCreateDocument | RawCreateElementNode | RawCreateTextNode | RawMoveNode | RawRemoveNode | RawSetNodeAttribute | RawRemoveNodeAttribute | RawSetNodeData | RawSetCssData | RawSetNodeScroll | RawSetInputValue | RawSetInputChecked | RawMouseMove | RawNetworkRequest | RawConsoleLog | RawCssInsertRule | RawCssDeleteRule | RawFetch | RawProfiler | RawOTable | RawRedux | RawVuex | RawMobX | RawNgRx | RawGraphQl | RawPerformanceTrack | RawStringDict | RawSetNodeAttributeDict | RawResourceTimingDeprecated | RawConnectionInformation | RawSetPageVisibility | RawLoadFontFace | RawSetNodeFocus | RawLongTask | RawSetNodeAttributeURLBased | RawSetCssDataURLBased | RawCssInsertRuleURLBased | RawMouseClick | RawCreateIFrameDocument | RawAdoptedSsReplaceURLBased | RawAdoptedSsReplace | RawAdoptedSsInsertRuleURLBased | RawAdoptedSsInsertRule | RawAdoptedSsDeleteRule | RawAdoptedSsAddOwner | RawAdoptedSsRemoveOwner | RawZustand | RawSelectionChange | RawMouseThrashing | RawResourceTiming | RawTabChange | RawTabData | RawIosCustomEvent | RawIosScreenChanges | RawIosClickEvent | RawIosInputEvent | RawIosPerformanceEvent | RawIosLog | RawIosNetworkCall | RawIosSwipeEvent;
|
||||
|
|
|
|||
|
|
@ -59,11 +59,12 @@ export const TP_MAP = {
|
|||
116: MType.ResourceTiming,
|
||||
117: MType.TabChange,
|
||||
118: MType.TabData,
|
||||
90: MType.IosSessionStart,
|
||||
93: MType.IosCustomEvent,
|
||||
96: MType.IosScreenChanges,
|
||||
100: MType.IosClickEvent,
|
||||
101: MType.IosInputEvent,
|
||||
102: MType.IosPerformanceEvent,
|
||||
103: MType.IosLog,
|
||||
105: MType.IosNetworkCall,
|
||||
106: MType.IosSwipeEvent,
|
||||
} as const
|
||||
|
|
|
|||
|
|
@ -1,26 +1,13 @@
|
|||
message 107, 'IOSBatchMeta', :replayer => false do
|
||||
message 90, 'IOSSessionStart', :replayer => false do
|
||||
uint 'Timestamp'
|
||||
uint 'Length'
|
||||
uint 'FirstIndex'
|
||||
end
|
||||
|
||||
message 90, 'IOSSessionStart', :replayer => true do
|
||||
uint 'Timestamp'
|
||||
# uint 'Length'
|
||||
|
||||
uint 'ProjectID'
|
||||
string 'TrackerVersion'
|
||||
string 'RevID'
|
||||
string 'UserUUID'
|
||||
# string 'UserAgent'
|
||||
string 'UserOS'
|
||||
string 'UserOSVersion'
|
||||
# string 'UserBrowser'
|
||||
# string 'UserBrowserVersion'
|
||||
string 'UserDevice'
|
||||
string 'UserDeviceType'
|
||||
# uint 'UserDeviceMemorySize'
|
||||
# uint 'UserDeviceHeapSize'
|
||||
string 'UserCountry'
|
||||
end
|
||||
|
||||
|
|
@ -71,18 +58,12 @@ message 97, 'IOSCrash' do
|
|||
string 'Stacktrace'
|
||||
end
|
||||
|
||||
message 98, 'IOSScreenEnter' do
|
||||
message 98, 'IOSViewComponentEvent' do
|
||||
uint 'Timestamp'
|
||||
uint 'Length'
|
||||
string 'Title'
|
||||
string 'ViewName'
|
||||
end
|
||||
|
||||
message 99, 'IOSScreenLeave' do
|
||||
uint 'Timestamp'
|
||||
uint 'Length'
|
||||
string 'Title'
|
||||
string 'ScreenName'
|
||||
string 'ViewName'
|
||||
boolean 'Visible'
|
||||
end
|
||||
|
||||
message 100, 'IOSClickEvent', :replayer => true do
|
||||
|
|
@ -93,7 +74,7 @@ message 100, 'IOSClickEvent', :replayer => true do
|
|||
uint 'Y'
|
||||
end
|
||||
|
||||
message 101, 'IOSInputEvent' do
|
||||
message 101, 'IOSInputEvent', :replayer => true do
|
||||
uint 'Timestamp'
|
||||
uint 'Length'
|
||||
string 'Value'
|
||||
|
|
@ -138,14 +119,30 @@ end
|
|||
message 105, 'IOSNetworkCall', :replayer => true do
|
||||
uint 'Timestamp'
|
||||
uint 'Length'
|
||||
uint 'Duration'
|
||||
string 'Headers'
|
||||
string 'Body'
|
||||
string 'URL'
|
||||
boolean 'Success'
|
||||
string 'Type'
|
||||
string 'Method'
|
||||
string 'URL'
|
||||
string 'Request'
|
||||
string 'Response'
|
||||
uint 'Status'
|
||||
uint 'Duration'
|
||||
end
|
||||
|
||||
message 106, 'IOSSwipeEvent', :replayer => true do
|
||||
uint 'Timestamp'
|
||||
uint 'Length'
|
||||
string 'Label'
|
||||
uint 'X'
|
||||
uint 'Y'
|
||||
string 'Direction'
|
||||
end
|
||||
|
||||
message 107, 'IOSBatchMeta', :replayer => false do
|
||||
uint 'Timestamp'
|
||||
uint 'Length'
|
||||
uint 'FirstIndex'
|
||||
end
|
||||
|
||||
message 110, 'IOSPerformanceAggregated', :swift => false do
|
||||
uint 'TimestampStart'
|
||||
uint 'TimestampEnd'
|
||||
|
|
|
|||
|
|
@ -0,0 +1,23 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
24
scripts/helmcharts/openreplay/charts/imagestorage/Chart.yaml
Normal file
24
scripts/helmcharts/openreplay/charts/imagestorage/Chart.yaml
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
apiVersion: v2
|
||||
name: imagestorage
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
AppVersion: "v1.14.0"
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "imagestorage.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "imagestorage.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "imagestorage.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "imagestorage.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "imagestorage.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "imagestorage.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "imagestorage.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "imagestorage.labels" -}}
|
||||
helm.sh/chart: {{ include "imagestorage.chart" . }}
|
||||
{{ include "imagestorage.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "imagestorage.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "imagestorage.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "imagestorage.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "imagestorage.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,131 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "imagestorage.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "imagestorage.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "imagestorage.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "imagestorage.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "imagestorage.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
shareProcessNamespace: true
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- if .Values.global.enterpriseEditionLicense }}
|
||||
image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee"
|
||||
{{- else }}
|
||||
image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.healthCheck}}
|
||||
{{- .Values.healthCheck | toYaml | nindent 10}}
|
||||
{{- end}}
|
||||
env:
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
{{- if .Values.global.s3.existingSecret }}
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.global.s3.existingSecret }}
|
||||
key: access-key
|
||||
{{- else }}
|
||||
value: {{ .Values.global.s3.accessKey }}
|
||||
{{- end }}
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
{{- if .Values.global.s3.existingSecret }}
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.global.s3.existingSecret }}
|
||||
key: secret-key
|
||||
{{- else }}
|
||||
value: {{ .Values.global.s3.secretKey }}
|
||||
{{- end }}
|
||||
- name: AWS_ENDPOINT
|
||||
value: '{{ .Values.global.s3.endpoint }}'
|
||||
- name: AWS_REGION
|
||||
value: '{{ .Values.global.s3.region }}'
|
||||
- name: BUCKET_NAME
|
||||
value: {{ .Values.global.s3.recordingsBucket }}
|
||||
- name: LICENSE_KEY
|
||||
value: '{{ .Values.global.enterpriseEditionLicense }}'
|
||||
- name: KAFKA_SERVERS
|
||||
value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}'
|
||||
- name: KAFKA_USE_SSL
|
||||
value: '{{ .Values.global.kafka.kafkaUseSsl }}'
|
||||
{{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }}
|
||||
{{- range $key, $val := .Values.global.env }}
|
||||
- name: {{ $key }}
|
||||
value: '{{ $val }}'
|
||||
{{- end }}
|
||||
{{- range $key, $val := .Values.env }}
|
||||
- name: {{ $key }}
|
||||
value: '{{ $val }}'
|
||||
{{- end}}
|
||||
ports:
|
||||
{{- range $key, $val := .Values.service.ports }}
|
||||
- name: {{ $key }}
|
||||
containerPort: {{ $val }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: datadir
|
||||
mountPath: /mnt/efs
|
||||
{{- include "openreplay.volume.redis_ca_certificate.mount" .Values.global.redis | nindent 10 }}
|
||||
{{- with .Values.persistence.mounts }}
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if eq (tpl .Values.pvc.name . ) "hostPath" }}
|
||||
volumes:
|
||||
{{- with .Values.persistence.volumes }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
- name: datadir
|
||||
hostPath:
|
||||
# Ensure the file directory is created.
|
||||
path: {{ .Values.pvc.hostMountPath }}
|
||||
type: DirectoryOrCreate
|
||||
{{- else }}
|
||||
volumes:
|
||||
{{- with .Values.persistence.volumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
- name: datadir
|
||||
persistentVolumeClaim:
|
||||
claimName: "{{ tpl .Values.pvc.name . }}"
|
||||
{{- end }}
|
||||
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 6 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
{{- if .Values.autoscaling.enabled }}
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ include "imagestorage.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "imagestorage.labels" . | nindent 4 }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ include "imagestorage.fullname" . }}
|
||||
minReplicas: {{ .Values.autoscaling.minReplicas }}
|
||||
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "imagestorage.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.ports.http -}}
|
||||
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
|
||||
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
|
||||
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "imagestorage.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||
ingressClassName: {{ .Values.ingress.className }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||
pathType: {{ .pathType }}
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
{{- else }}
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "imagestorage.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "imagestorage.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
{{- range $key, $val := .Values.service.ports }}
|
||||
- port: {{ $val }}
|
||||
targetPort: {{ $key }}
|
||||
protocol: TCP
|
||||
name: {{ $key }}
|
||||
{{- end}}
|
||||
selector:
|
||||
{{- include "imagestorage.selectorLabels" . | nindent 4 }}
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.serviceMonitor.enabled ) }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ include "imagestorage.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "imagestorage.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
{{- .Values.serviceMonitor.scrapeConfigs | toYaml | nindent 4 }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "imagestorage.selectorLabels" . | nindent 6 }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "imagestorage.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "imagestorage.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "imagestorage.fullname" . }}-test-connection"
|
||||
labels:
|
||||
{{- include "imagestorage.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: busybox
|
||||
command: ['wget']
|
||||
args: ['{{ include "imagestorage.fullname" . }}:{{ .Values.service.port }}']
|
||||
restartPolicy: Never
|
||||
124
scripts/helmcharts/openreplay/charts/imagestorage/values.yaml
Normal file
124
scripts/helmcharts/openreplay/charts/imagestorage/values.yaml
Normal file
|
|
@ -0,0 +1,124 @@
|
|||
# Default values for openreplay.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: "{{ .Values.global.openReplayContainerRegistry }}/imagestorage"
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: "imagestorage"
|
||||
fullnameOverride: "imagestorage-openreplay"
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
podSecurityContext:
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
fsGroup: 1001
|
||||
fsGroupChangePolicy: "OnRootMismatch"
|
||||
|
||||
#securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
http: 9000
|
||||
metrics: 8888
|
||||
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
additionalLabels:
|
||||
release: observability
|
||||
scrapeConfigs:
|
||||
- port: metrics
|
||||
honorLabels: true
|
||||
interval: 15s
|
||||
path: /metrics
|
||||
scheme: http
|
||||
scrapeTimeout: 10s
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 5
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
env:
|
||||
FS_CLEAN_HRS: 24
|
||||
|
||||
pvc:
|
||||
# This can be either persistentVolumeClaim or hostPath.
|
||||
# In case of pvc, you'll have to provide the pvc name.
|
||||
# For example
|
||||
# name: openreplay-efs
|
||||
name: "{{ .Values.global.pvcRWXName }}"
|
||||
hostMountPath: /openreplay/storage/nfs
|
||||
|
||||
persistence: {}
|
||||
# # Spec of spec.template.spec.containers[*].volumeMounts
|
||||
# mounts:
|
||||
# - name: kafka-ssl
|
||||
# mountPath: /opt/kafka/ssl
|
||||
# # Spec of spec.template.spec.volumes
|
||||
# volumes:
|
||||
# - name: kafka-ssl
|
||||
# secret:
|
||||
# secretName: kafka-ssl
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*.orig
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
.vscode/
|
||||
24
scripts/helmcharts/openreplay/charts/videostorage/Chart.yaml
Normal file
24
scripts/helmcharts/openreplay/charts/videostorage/Chart.yaml
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
apiVersion: v2
|
||||
name: videostorage
|
||||
description: A Helm chart for Kubernetes
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.1.1
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
AppVersion: "v1.14.0"
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
1. Get the application URL by running these commands:
|
||||
{{- if .Values.ingress.enabled }}
|
||||
{{- range $host := .Values.ingress.hosts }}
|
||||
{{- range .paths }}
|
||||
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- else if contains "NodePort" .Values.service.type }}
|
||||
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "videostorage.fullname" . }})
|
||||
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
|
||||
echo http://$NODE_IP:$NODE_PORT
|
||||
{{- else if contains "LoadBalancer" .Values.service.type }}
|
||||
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
|
||||
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "videostorage.fullname" . }}'
|
||||
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "videostorage.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
|
||||
echo http://$SERVICE_IP:{{ .Values.service.port }}
|
||||
{{- else if contains "ClusterIP" .Values.service.type }}
|
||||
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "videostorage.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
|
||||
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
|
||||
echo "Visit http://127.0.0.1:8080 to use your application"
|
||||
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "videostorage.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "videostorage.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "videostorage.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "videostorage.labels" -}}
|
||||
helm.sh/chart: {{ include "videostorage.chart" . }}
|
||||
{{ include "videostorage.selectorLabels" . }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Selector labels
|
||||
*/}}
|
||||
{{- define "videostorage.selectorLabels" -}}
|
||||
app.kubernetes.io/name: {{ include "videostorage.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "videostorage.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
{{- default (include "videostorage.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else }}
|
||||
{{- default "default" .Values.serviceAccount.name }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,131 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ include "videostorage.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "videostorage.labels" . | nindent 4 }}
|
||||
spec:
|
||||
{{- if not .Values.autoscaling.enabled }}
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "videostorage.selectorLabels" . | nindent 6 }}
|
||||
template:
|
||||
metadata:
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "videostorage.selectorLabels" . | nindent 8 }}
|
||||
spec:
|
||||
{{- with .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
serviceAccountName: {{ include "videostorage.serviceAccountName" . }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.podSecurityContext | nindent 8 }}
|
||||
shareProcessNamespace: true
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
securityContext:
|
||||
{{- toYaml .Values.securityContext | nindent 12 }}
|
||||
{{- if .Values.global.enterpriseEditionLicense }}
|
||||
image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee"
|
||||
{{- else }}
|
||||
image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
{{- end }}
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.healthCheck}}
|
||||
{{- .Values.healthCheck | toYaml | nindent 10}}
|
||||
{{- end}}
|
||||
env:
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
{{- if .Values.global.s3.existingSecret }}
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.global.s3.existingSecret }}
|
||||
key: access-key
|
||||
{{- else }}
|
||||
value: {{ .Values.global.s3.accessKey }}
|
||||
{{- end }}
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
{{- if .Values.global.s3.existingSecret }}
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ .Values.global.s3.existingSecret }}
|
||||
key: secret-key
|
||||
{{- else }}
|
||||
value: {{ .Values.global.s3.secretKey }}
|
||||
{{- end }}
|
||||
- name: AWS_ENDPOINT
|
||||
value: '{{ .Values.global.s3.endpoint }}'
|
||||
- name: AWS_REGION
|
||||
value: '{{ .Values.global.s3.region }}'
|
||||
- name: BUCKET_NAME
|
||||
value: {{ .Values.global.s3.recordingsBucket }}
|
||||
- name: LICENSE_KEY
|
||||
value: '{{ .Values.global.enterpriseEditionLicense }}'
|
||||
- name: KAFKA_SERVERS
|
||||
value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}'
|
||||
- name: KAFKA_USE_SSL
|
||||
value: '{{ .Values.global.kafka.kafkaUseSsl }}'
|
||||
{{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }}
|
||||
{{- range $key, $val := .Values.global.env }}
|
||||
- name: {{ $key }}
|
||||
value: '{{ $val }}'
|
||||
{{- end }}
|
||||
{{- range $key, $val := .Values.env }}
|
||||
- name: {{ $key }}
|
||||
value: '{{ $val }}'
|
||||
{{- end}}
|
||||
ports:
|
||||
{{- range $key, $val := .Values.service.ports }}
|
||||
- name: {{ $key }}
|
||||
containerPort: {{ $val }}
|
||||
protocol: TCP
|
||||
{{- end }}
|
||||
resources:
|
||||
{{- toYaml .Values.resources | nindent 12 }}
|
||||
volumeMounts:
|
||||
- name: datadir
|
||||
mountPath: /mnt/efs
|
||||
{{- include "openreplay.volume.redis_ca_certificate.mount" .Values.global.redis | nindent 10 }}
|
||||
{{- with .Values.persistence.mounts }}
|
||||
{{- toYaml . | nindent 10 }}
|
||||
{{- end }}
|
||||
{{- if eq (tpl .Values.pvc.name . ) "hostPath" }}
|
||||
volumes:
|
||||
{{- with .Values.persistence.volumes }}
|
||||
{{- toYaml . | nindent 6 }}
|
||||
{{- end }}
|
||||
- name: datadir
|
||||
hostPath:
|
||||
# Ensure the file directory is created.
|
||||
path: {{ .Values.pvc.hostMountPath }}
|
||||
type: DirectoryOrCreate
|
||||
{{- else }}
|
||||
volumes:
|
||||
{{- with .Values.persistence.volumes }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
- name: datadir
|
||||
persistentVolumeClaim:
|
||||
claimName: "{{ tpl .Values.pvc.name . }}"
|
||||
{{- end }}
|
||||
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 6 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
{{- if .Values.autoscaling.enabled }}
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ include "videostorage.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "videostorage.labels" . | nindent 4 }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ include "videostorage.fullname" . }}
|
||||
minReplicas: {{ .Values.autoscaling.minReplicas }}
|
||||
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "videostorage.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.ports.http -}}
|
||||
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
|
||||
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
|
||||
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ $fullName }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "videostorage.labels" . | nindent 4 }}
|
||||
{{- with .Values.ingress.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||
ingressClassName: {{ .Values.ingress.className }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingress.tls }}
|
||||
tls:
|
||||
{{- range .Values.ingress.tls }}
|
||||
- hosts:
|
||||
{{- range .hosts }}
|
||||
- {{ . | quote }}
|
||||
{{- end }}
|
||||
secretName: {{ .secretName }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range .Values.ingress.hosts }}
|
||||
- host: {{ .host | quote }}
|
||||
http:
|
||||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ .path }}
|
||||
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||
pathType: {{ .pathType }}
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
{{- else }}
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "videostorage.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "videostorage.labels" . | nindent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
ports:
|
||||
{{- range $key, $val := .Values.service.ports }}
|
||||
- port: {{ $val }}
|
||||
targetPort: {{ $key }}
|
||||
protocol: TCP
|
||||
name: {{ $key }}
|
||||
{{- end}}
|
||||
selector:
|
||||
{{- include "videostorage.selectorLabels" . | nindent 4 }}
|
||||
|
|
@ -0,0 +1,18 @@
|
|||
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.serviceMonitor.enabled ) }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ include "videostorage.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "videostorage.labels" . | nindent 4 }}
|
||||
{{- if .Values.serviceMonitor.additionalLabels }}
|
||||
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
{{- .Values.serviceMonitor.scrapeConfigs | toYaml | nindent 4 }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "videostorage.selectorLabels" . | nindent 6 }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,13 @@
|
|||
{{- if .Values.serviceAccount.create -}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "videostorage.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "videostorage.labels" . | nindent 4 }}
|
||||
{{- with .Values.serviceAccount.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: "{{ include "videostorage.fullname" . }}-test-connection"
|
||||
labels:
|
||||
{{- include "videostorage.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
"helm.sh/hook": test
|
||||
spec:
|
||||
containers:
|
||||
- name: wget
|
||||
image: busybox
|
||||
command: ['wget']
|
||||
args: ['{{ include "videostorage.fullname" . }}:{{ .Values.service.port }}']
|
||||
restartPolicy: Never
|
||||
124
scripts/helmcharts/openreplay/charts/videostorage/values.yaml
Normal file
124
scripts/helmcharts/openreplay/charts/videostorage/values.yaml
Normal file
|
|
@ -0,0 +1,124 @@
|
|||
# Default values for openreplay.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: "{{ .Values.global.openReplayContainerRegistry }}/videostorage"
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
|
||||
imagePullSecrets: []
|
||||
nameOverride: "videostorage"
|
||||
fullnameOverride: "videostorage-openreplay"
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created
|
||||
create: true
|
||||
# Annotations to add to the service account
|
||||
annotations: {}
|
||||
# The name of the service account to use.
|
||||
# If not set and create is true, a name is generated using the fullname template
|
||||
name: ""
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
podSecurityContext:
|
||||
runAsUser: 1001
|
||||
runAsGroup: 1001
|
||||
fsGroup: 1001
|
||||
fsGroupChangePolicy: "OnRootMismatch"
|
||||
|
||||
#securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
http: 9000
|
||||
metrics: 8888
|
||||
|
||||
serviceMonitor:
|
||||
enabled: true
|
||||
additionalLabels:
|
||||
release: observability
|
||||
scrapeConfigs:
|
||||
- port: metrics
|
||||
honorLabels: true
|
||||
interval: 15s
|
||||
path: /metrics
|
||||
scheme: http
|
||||
scrapeTimeout: 10s
|
||||
|
||||
ingress:
|
||||
enabled: false
|
||||
className: ""
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
- host: chart-example.local
|
||||
paths:
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
tls: []
|
||||
# - secretName: chart-example-tls
|
||||
# hosts:
|
||||
# - chart-example.local
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
autoscaling:
|
||||
enabled: false
|
||||
minReplicas: 1
|
||||
maxReplicas: 5
|
||||
targetCPUUtilizationPercentage: 80
|
||||
# targetMemoryUtilizationPercentage: 80
|
||||
|
||||
env:
|
||||
FS_CLEAN_HRS: 24
|
||||
|
||||
pvc:
|
||||
# This can be either persistentVolumeClaim or hostPath.
|
||||
# In case of pvc, you'll have to provide the pvc name.
|
||||
# For example
|
||||
# name: openreplay-efs
|
||||
name: "{{ .Values.global.pvcRWXName }}"
|
||||
hostMountPath: /openreplay/storage/nfs
|
||||
|
||||
persistence: {}
|
||||
# # Spec of spec.template.spec.containers[*].volumeMounts
|
||||
# mounts:
|
||||
# - name: kafka-ssl
|
||||
# mountPath: /opt/kafka/ssl
|
||||
# # Spec of spec.template.spec.volumes
|
||||
# volumes:
|
||||
# - name: kafka-ssl
|
||||
# secret:
|
||||
# secretName: kafka-ssl
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
Loading…
Add table
Reference in a new issue