refactor(backend-heuristics): bring all sub-bilders to common interface
This commit is contained in:
parent
432c0da4e2
commit
62b36bd70a
9 changed files with 269 additions and 270 deletions
|
|
@ -1,41 +1,34 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/intervals"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
type messageProcessor interface {
|
||||
Handle(message Message, messageID uint64, timestamp uint64) Message
|
||||
Build() Message
|
||||
}
|
||||
|
||||
type builder struct {
|
||||
readyMsgs []Message
|
||||
timestamp uint64
|
||||
lastProcessedTimestamp int64
|
||||
ptaBuilder *performanceTrackAggrBuilder
|
||||
ciFinder *cpuIssueFinder
|
||||
miFinder *memoryIssueFinder
|
||||
ddDetector *domDropDetector
|
||||
crDetector *clickRageDetector
|
||||
dcDetector *deadClickDetector
|
||||
readyMsgs []Message
|
||||
timestamp uint64
|
||||
processors []messageProcessor
|
||||
}
|
||||
|
||||
func NewBuilder() *builder {
|
||||
return &builder{
|
||||
ptaBuilder: &performanceTrackAggrBuilder{},
|
||||
ciFinder: &cpuIssueFinder{},
|
||||
miFinder: &memoryIssueFinder{},
|
||||
ddDetector: &domDropDetector{},
|
||||
crDetector: &clickRageDetector{},
|
||||
dcDetector: &deadClickDetector{},
|
||||
processors: []messageProcessor{
|
||||
&performanceTrackAggrBuilder{},
|
||||
&cpuIssueFinder{},
|
||||
&memoryIssueFinder{},
|
||||
// &domDropDetector{},
|
||||
&clickRageDetector{},
|
||||
&deadClickDetector{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *builder) appendReadyMessage(msg Message) { // interface is never nil even if it holds nil value
|
||||
b.readyMsgs = append(b.readyMsgs, msg)
|
||||
}
|
||||
|
||||
func (b *builder) iterateReadyMessage(iter func(msg Message)) {
|
||||
for _, readyMsg := range b.readyMsgs {
|
||||
iter(readyMsg)
|
||||
|
|
@ -48,65 +41,38 @@ func (b *builder) handleMessage(message Message, messageID uint64) {
|
|||
if b.timestamp < timestamp {
|
||||
b.timestamp = timestamp
|
||||
}
|
||||
|
||||
b.lastProcessedTimestamp = time.Now().UnixMilli()
|
||||
|
||||
if b.timestamp == 0 {
|
||||
// in case of SessionStart. TODO: make timestamp system transparent
|
||||
return
|
||||
}
|
||||
switch msg := message.(type) {
|
||||
case *PerformanceTrack:
|
||||
if rm := b.ptaBuilder.HandlePerformanceTrack(msg, b.timestamp); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
|
||||
for _, p := range b.processors {
|
||||
/* If nil is not returned explicitely by Handle, but as the typed nil
|
||||
("var i *IssueEvent; return i;")
|
||||
The `rm != nil` will be true.
|
||||
TODO: enforce nil to be nil(?) or add `isNil() bool` to the Message types
|
||||
because this part is expected to be etendable by user with custom messageProcessor's.
|
||||
Use of reflrction will be probably bad on millions of messages?
|
||||
*/
|
||||
if rm := p.Handle(message, messageID, b.timestamp); rm != nil {
|
||||
b.readyMsgs = append(b.readyMsgs, rm)
|
||||
}
|
||||
if rm := b.ciFinder.HandlePerformanceTrack(msg, messageID, b.timestamp); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
if rm := b.miFinder.HandlePerformanceTrack(msg, messageID, b.timestamp); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
case *CreateElementNode,
|
||||
*CreateTextNode:
|
||||
b.ddDetector.HandleNodeCreation()
|
||||
case *RemoveNode:
|
||||
b.ddDetector.HandleNodeRemoval(b.timestamp)
|
||||
case *CreateDocument:
|
||||
if rm := b.ddDetector.Build(); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
}
|
||||
if rm := b.dcDetector.HandleMessage(message, messageID, b.timestamp); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *builder) checkTimeouts(ts int64) bool {
|
||||
if b.timestamp == 0 {
|
||||
return false // There was no timestamp events yet
|
||||
}
|
||||
|
||||
if b.ptaBuilder.HasInstance() && int64(b.ptaBuilder.GetStartTimestamp())+intervals.EVENTS_PERFORMANCE_AGGREGATION_TIMEOUT < ts {
|
||||
if msg := b.ptaBuilder.Build(); msg != nil {
|
||||
b.appendReadyMessage(msg)
|
||||
}
|
||||
return false // SessionStart happened only
|
||||
}
|
||||
|
||||
lastTsGap := ts - int64(b.timestamp)
|
||||
// Maybe listen for `trigger` and react on SessionEnd instead (less reliable)
|
||||
if lastTsGap > intervals.EVENTS_SESSION_END_TIMEOUT {
|
||||
if rm := b.ddDetector.Build(); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
if rm := b.ciFinder.Build(); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
if rm := b.miFinder.Build(); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
if rm := b.crDetector.Build(); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
if rm := b.dcDetector.HandleReaction(b.timestamp); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
for _, p := range b.processors {
|
||||
// TODO: same as above
|
||||
if rm := p.Build(); rm != nil {
|
||||
b.readyMsgs = append(b.readyMsgs, rm)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,8 +15,6 @@ func (m builderMap) GetBuilder(sessionID uint64) *builder {
|
|||
if b == nil {
|
||||
b = NewBuilder()
|
||||
m[sessionID] = b
|
||||
b.sid = sessionID
|
||||
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import (
|
|||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
const CLICK_TIME_DIFF = 300
|
||||
const MAX_TIME_DIFF = 300
|
||||
const MIN_CLICKS_IN_A_ROW = 3
|
||||
|
||||
type clickRageDetector struct {
|
||||
|
|
@ -17,39 +17,50 @@ type clickRageDetector struct {
|
|||
countsInARow int
|
||||
}
|
||||
|
||||
func (crd *clickRageDetector) Build() *IssueEvent {
|
||||
var i *IssueEvent
|
||||
if crd.countsInARow >= MIN_CLICKS_IN_A_ROW {
|
||||
payload, _ := json.Marshal(struct{ Count int }{crd.countsInARow})
|
||||
i = &IssueEvent{
|
||||
Type: "click_rage",
|
||||
ContextString: crd.lastLabel,
|
||||
Payload: string(payload), // TODO: json encoder
|
||||
Timestamp: crd.firstInARawTimestamp,
|
||||
MessageID: crd.firstInARawMessageId,
|
||||
}
|
||||
}
|
||||
func (crd *clickRageDetector) reset() {
|
||||
crd.lastTimestamp = 0
|
||||
crd.lastLabel = ""
|
||||
crd.firstInARawTimestamp = 0
|
||||
crd.firstInARawMessageId = 0
|
||||
crd.countsInARow = 0
|
||||
return i
|
||||
}
|
||||
|
||||
func (crd *clickRageDetector) HandleMouseClick(msg *MouseClick, messageID uint64, timestamp uint64) *IssueEvent {
|
||||
if crd.lastTimestamp+CLICK_TIME_DIFF > timestamp && crd.lastLabel == msg.Label {
|
||||
crd.lastTimestamp = timestamp
|
||||
crd.countsInARow += 1
|
||||
return nil
|
||||
func (crd *clickRageDetector) Build() Message {
|
||||
if crd.countsInARow >= MIN_CLICKS_IN_A_ROW {
|
||||
payload, _ := json.Marshal(struct{ Count int }{crd.countsInARow})
|
||||
i := &IssueEvent{
|
||||
Type: "click_rage",
|
||||
ContextString: crd.lastLabel,
|
||||
Payload: string(payload), // TODO: json message field type
|
||||
Timestamp: crd.firstInARawTimestamp,
|
||||
MessageID: crd.firstInARawMessageId,
|
||||
}
|
||||
crd.reset()
|
||||
return i
|
||||
}
|
||||
i := crd.Build()
|
||||
if msg.Label != "" {
|
||||
crd.reset()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (crd *clickRageDetector) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *MouseClick:
|
||||
// TODO: check if we it is ok to capture clickrages without the connected CleckEvent in db.
|
||||
if msg.Label == "" {
|
||||
return crd.Build()
|
||||
}
|
||||
if crd.lastLabel == msg.Label && timestamp-crd.lastTimestamp < MAX_TIME_DIFF {
|
||||
crd.lastTimestamp = timestamp
|
||||
crd.countsInARow += 1
|
||||
return nil
|
||||
}
|
||||
i := crd.Build()
|
||||
crd.lastTimestamp = timestamp
|
||||
crd.lastLabel = msg.Label
|
||||
crd.firstInARawTimestamp = timestamp
|
||||
crd.firstInARawMessageId = messageID
|
||||
crd.countsInARow = 1
|
||||
return i
|
||||
}
|
||||
return i
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ type cpuIssueFinder struct {
|
|||
contextString string
|
||||
}
|
||||
|
||||
func (f *cpuIssueFinder) Build() *IssueEvent {
|
||||
func (f *cpuIssueFinder) Build() Message {
|
||||
if f.startTimestamp == 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -47,35 +47,35 @@ func (f *cpuIssueFinder) Build() *IssueEvent {
|
|||
}
|
||||
}
|
||||
|
||||
func (f *cpuIssueFinder) HandleSetPageLocation(msg *SetPageLocation) {
|
||||
f.contextString = msg.URL
|
||||
}
|
||||
|
||||
func (f *cpuIssueFinder) HandlePerformanceTrack(msg *PerformanceTrack, messageID uint64, timestamp uint64) *IssueEvent {
|
||||
dt := performance.TimeDiff(timestamp, f.lastTimestamp)
|
||||
if dt == 0 {
|
||||
return nil // TODO: handle error
|
||||
}
|
||||
|
||||
f.lastTimestamp = timestamp
|
||||
|
||||
if msg.Frames == -1 || msg.Ticks == -1 {
|
||||
return f.Build()
|
||||
}
|
||||
|
||||
cpuRate := performance.CPURate(msg.Ticks, dt)
|
||||
|
||||
if cpuRate >= CPU_THRESHOLD {
|
||||
if f.startTimestamp == 0 {
|
||||
f.startTimestamp = timestamp
|
||||
f.startMessageID = messageID
|
||||
func (f *cpuIssueFinder) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *PerformanceTrack:
|
||||
dt := performance.TimeDiff(timestamp, f.lastTimestamp)
|
||||
if dt == 0 {
|
||||
return nil // TODO: handle error
|
||||
}
|
||||
if f.maxRate < cpuRate {
|
||||
f.maxRate = cpuRate
|
||||
}
|
||||
} else {
|
||||
return f.Build()
|
||||
}
|
||||
|
||||
f.lastTimestamp = timestamp
|
||||
|
||||
if msg.Frames == -1 || msg.Ticks == -1 {
|
||||
return f.Build()
|
||||
}
|
||||
|
||||
cpuRate := performance.CPURate(msg.Ticks, dt)
|
||||
|
||||
if cpuRate >= CPU_THRESHOLD {
|
||||
if f.startTimestamp == 0 {
|
||||
f.startTimestamp = timestamp
|
||||
f.startMessageID = messageID
|
||||
}
|
||||
if f.maxRate < cpuRate {
|
||||
f.maxRate = cpuRate
|
||||
}
|
||||
} else {
|
||||
return f.Build()
|
||||
}
|
||||
case *SetPageLocation:
|
||||
f.contextString = msg.URL
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,50 +7,61 @@ import (
|
|||
const CLICK_RELATION_TIME = 1400
|
||||
|
||||
type deadClickDetector struct {
|
||||
lastMouseClick *MouseClick
|
||||
lastTimestamp uint64
|
||||
lastMessageID uint64
|
||||
inputIDSet map[uint64]bool
|
||||
lastTimestamp uint64
|
||||
lastMouseClick *MouseClick
|
||||
lastClickTimestamp uint64
|
||||
lastMessageID uint64
|
||||
inputIDSet map[uint64]bool
|
||||
}
|
||||
|
||||
func (d *deadClickDetector) HandleReaction(timestamp uint64) *IssueEvent {
|
||||
var i *IssueEvent
|
||||
if d.lastMouseClick != nil && d.lastTimestamp+CLICK_RELATION_TIME < timestamp {
|
||||
i = &IssueEvent{
|
||||
Type: "dead_click",
|
||||
ContextString: d.lastMouseClick.Label,
|
||||
Timestamp: d.lastTimestamp,
|
||||
MessageID: d.lastMessageID,
|
||||
}
|
||||
}
|
||||
func (d *deadClickDetector) reset() {
|
||||
d.inputIDSet = nil
|
||||
d.lastMouseClick = nil
|
||||
d.lastTimestamp = 0
|
||||
d.lastClickTimestamp = 0
|
||||
d.lastMessageID = 0
|
||||
}
|
||||
|
||||
func (d *deadClickDetector) handleReaction(timestamp uint64) Message {
|
||||
if d.lastMouseClick == nil || d.lastClickTimestamp+CLICK_RELATION_TIME > timestamp { // riaction is instant
|
||||
d.reset()
|
||||
return nil
|
||||
}
|
||||
i := &IssueEvent{
|
||||
Type: "dead_click",
|
||||
ContextString: d.lastMouseClick.Label,
|
||||
Timestamp: d.lastClickTimestamp,
|
||||
MessageID: d.lastMessageID,
|
||||
}
|
||||
d.reset()
|
||||
return i
|
||||
}
|
||||
|
||||
func (d *deadClickDetector) HandleMessage(msg Message, messageID uint64, timestamp uint64) *IssueEvent {
|
||||
var i *IssueEvent
|
||||
switch m := msg.(type) {
|
||||
func (d *deadClickDetector) Build() Message {
|
||||
return d.handleReaction(d.lastTimestamp)
|
||||
}
|
||||
|
||||
func (d *deadClickDetector) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
d.lastTimestamp = timestamp
|
||||
switch msg := message.(type) {
|
||||
case *SetInputTarget:
|
||||
if d.inputIDSet == nil {
|
||||
d.inputIDSet = make(map[uint64]bool)
|
||||
}
|
||||
d.inputIDSet[m.ID] = true
|
||||
d.inputIDSet[msg.ID] = true
|
||||
case *CreateDocument:
|
||||
d.inputIDSet = nil
|
||||
case *MouseClick:
|
||||
if m.Label == "" {
|
||||
if msg.Label == "" {
|
||||
return nil
|
||||
}
|
||||
i = d.HandleReaction(timestamp)
|
||||
if d.inputIDSet[m.ID] { // ignore if input
|
||||
i := d.handleReaction(timestamp)
|
||||
if d.inputIDSet[msg.ID] { // ignore if input
|
||||
return i
|
||||
}
|
||||
d.lastMouseClick = m
|
||||
d.lastTimestamp = timestamp
|
||||
d.lastMouseClick = msg
|
||||
d.lastClickTimestamp = timestamp
|
||||
d.lastMessageID = messageID
|
||||
return i
|
||||
case *SetNodeAttribute,
|
||||
*RemoveNodeAttribute,
|
||||
*CreateElementNode,
|
||||
|
|
@ -60,7 +71,7 @@ func (d *deadClickDetector) HandleMessage(msg Message, messageID uint64, timesta
|
|||
*SetCSSData,
|
||||
*CSSInsertRule,
|
||||
*CSSDeleteRule:
|
||||
i = d.HandleReaction(timestamp)
|
||||
return d.handleReaction(timestamp)
|
||||
}
|
||||
return i
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,36 +4,45 @@ import (
|
|||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
const DROP_WINDOW = 200 //ms
|
||||
const CRITICAL_COUNT = 1 // Our login page contains 20. But on crush it removes only roots (1-3 nodes).
|
||||
// TODO: smart detection (making whole DOM tree would eat all memory)
|
||||
|
||||
type domDropDetector struct {
|
||||
removedCount int
|
||||
lastDropTimestamp uint64
|
||||
}
|
||||
|
||||
const DROP_WINDOW = 200 //ms
|
||||
const CRITICAL_COUNT = 1 // Our login page contains 20. But on crush it removes only roots (1-3 nodes).
|
||||
|
||||
func (dd *domDropDetector) HandleNodeCreation() {
|
||||
func (dd *domDropDetector) reset() {
|
||||
dd.removedCount = 0
|
||||
dd.lastDropTimestamp = 0
|
||||
}
|
||||
|
||||
func (dd *domDropDetector) HandleNodeRemoval(ts uint64) {
|
||||
if dd.lastDropTimestamp+DROP_WINDOW > ts {
|
||||
dd.removedCount += 1
|
||||
} else {
|
||||
dd.removedCount = 1
|
||||
func (dd *domDropDetector) Handle(message Message, _ uint64, timestamp uint64) Message {
|
||||
switch message.(type) {
|
||||
case *CreateElementNode,
|
||||
*CreateTextNode:
|
||||
dd.removedCount = 0
|
||||
dd.lastDropTimestamp = 0
|
||||
case *RemoveNode:
|
||||
if dd.lastDropTimestamp+DROP_WINDOW > timestamp {
|
||||
dd.removedCount += 1
|
||||
} else {
|
||||
dd.removedCount = 1
|
||||
}
|
||||
dd.lastDropTimestamp = timestamp
|
||||
}
|
||||
dd.lastDropTimestamp = ts
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dd *domDropDetector) Build() *DOMDrop {
|
||||
var domDrop *DOMDrop
|
||||
func (dd *domDropDetector) Build() Message {
|
||||
if dd.removedCount >= CRITICAL_COUNT {
|
||||
domDrop = &DOMDrop{
|
||||
domDrop := &DOMDrop{
|
||||
Timestamp: dd.lastDropTimestamp,
|
||||
}
|
||||
dd.reset()
|
||||
return domDrop
|
||||
}
|
||||
dd.removedCount = 0
|
||||
dd.lastDropTimestamp = 0
|
||||
return domDrop
|
||||
dd.reset()
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ type memoryIssueFinder struct {
|
|||
contextString string
|
||||
}
|
||||
|
||||
func (f *memoryIssueFinder) Build() *IssueEvent {
|
||||
func (f *memoryIssueFinder) Build() Message {
|
||||
if f.startTimestamp == 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -37,34 +37,34 @@ func (f *memoryIssueFinder) Build() *IssueEvent {
|
|||
return i
|
||||
}
|
||||
|
||||
func (f *memoryIssueFinder) HandleSetPageLocation(msg *SetPageLocation) {
|
||||
f.contextString = msg.URL
|
||||
}
|
||||
func (f *memoryIssueFinder) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *PerformanceTrack:
|
||||
if f.count < MIN_COUNT {
|
||||
f.sum += float64(msg.UsedJSHeapSize)
|
||||
f.count++
|
||||
return nil
|
||||
}
|
||||
|
||||
average := f.sum / f.count
|
||||
rate := int(math.Round(float64(msg.UsedJSHeapSize) / average * 100))
|
||||
|
||||
func (f *memoryIssueFinder) HandlePerformanceTrack(msg *PerformanceTrack, messageID uint64, timestamp uint64) *IssueEvent {
|
||||
if f.count < MIN_COUNT {
|
||||
f.sum += float64(msg.UsedJSHeapSize)
|
||||
f.count++
|
||||
return nil
|
||||
}
|
||||
|
||||
average := f.sum / f.count
|
||||
rate := int(math.Round(float64(msg.UsedJSHeapSize) / average * 100))
|
||||
|
||||
f.sum += float64(msg.UsedJSHeapSize)
|
||||
f.count++
|
||||
|
||||
if rate >= MEM_RATE_THRESHOLD {
|
||||
if f.startTimestamp == 0 {
|
||||
f.startTimestamp = timestamp
|
||||
f.startMessageID = messageID
|
||||
if rate >= MEM_RATE_THRESHOLD {
|
||||
if f.startTimestamp == 0 {
|
||||
f.startTimestamp = timestamp
|
||||
f.startMessageID = messageID
|
||||
}
|
||||
if f.rate < rate {
|
||||
f.rate = rate
|
||||
}
|
||||
} else {
|
||||
return f.Build()
|
||||
}
|
||||
if f.rate < rate {
|
||||
f.rate = rate
|
||||
}
|
||||
} else {
|
||||
return f.Build()
|
||||
case *SetPageLocation:
|
||||
f.contextString = msg.URL
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,100 +7,105 @@ import (
|
|||
"openreplay/backend/pkg/messages/performance"
|
||||
)
|
||||
|
||||
const AGGREGATION_WINDOW = 2 * 60 * 1000
|
||||
|
||||
type performanceTrackAggrBuilder struct {
|
||||
performanceTrackAggr *PerformanceTrackAggr
|
||||
lastTimestamp uint64
|
||||
count float64
|
||||
sumFrameRate float64
|
||||
sumTickRate float64
|
||||
sumTotalJSHeapSize float64
|
||||
sumUsedJSHeapSize float64
|
||||
*PerformanceTrackAggr
|
||||
lastTimestamp uint64
|
||||
count float64
|
||||
sumFrameRate float64
|
||||
sumTickRate float64
|
||||
sumTotalJSHeapSize float64
|
||||
sumUsedJSHeapSize float64
|
||||
}
|
||||
|
||||
func (b *performanceTrackAggrBuilder) start(timestamp uint64) {
|
||||
b.performanceTrackAggr = &PerformanceTrackAggr{
|
||||
b.PerformanceTrackAggr = &PerformanceTrackAggr{
|
||||
TimestampStart: timestamp,
|
||||
}
|
||||
b.lastTimestamp = timestamp
|
||||
}
|
||||
|
||||
func (b *performanceTrackAggrBuilder) HandlePerformanceTrack(msg *PerformanceTrack, timestamp uint64) *PerformanceTrackAggr {
|
||||
if msg.Frames == -1 || msg.Ticks == -1 || !b.HasInstance() {
|
||||
performanceTrackAggr := b.Build()
|
||||
b.start(timestamp)
|
||||
return performanceTrackAggr
|
||||
}
|
||||
|
||||
dt := performance.TimeDiff(timestamp, b.lastTimestamp)
|
||||
if dt == 0 {
|
||||
return nil // TODO: handle error
|
||||
}
|
||||
|
||||
frameRate := performance.FrameRate(msg.Frames, dt)
|
||||
tickRate := performance.TickRate(msg.Ticks, dt)
|
||||
|
||||
fps := uint64(math.Round(frameRate))
|
||||
cpu := performance.CPURateFromTickRate(tickRate)
|
||||
if fps < b.performanceTrackAggr.MinFPS || b.performanceTrackAggr.MinFPS == 0 {
|
||||
b.performanceTrackAggr.MinFPS = fps
|
||||
}
|
||||
if fps > b.performanceTrackAggr.MaxFPS {
|
||||
b.performanceTrackAggr.MaxFPS = fps
|
||||
}
|
||||
if cpu < b.performanceTrackAggr.MinCPU || b.performanceTrackAggr.MinCPU == 0 {
|
||||
b.performanceTrackAggr.MinCPU = cpu
|
||||
}
|
||||
if cpu > b.performanceTrackAggr.MaxCPU {
|
||||
b.performanceTrackAggr.MaxCPU = cpu
|
||||
}
|
||||
if msg.TotalJSHeapSize < b.performanceTrackAggr.MinTotalJSHeapSize || b.performanceTrackAggr.MinTotalJSHeapSize == 0 {
|
||||
b.performanceTrackAggr.MinTotalJSHeapSize = msg.TotalJSHeapSize
|
||||
}
|
||||
if msg.TotalJSHeapSize > b.performanceTrackAggr.MaxTotalJSHeapSize {
|
||||
b.performanceTrackAggr.MaxTotalJSHeapSize = msg.TotalJSHeapSize
|
||||
}
|
||||
if msg.UsedJSHeapSize < b.performanceTrackAggr.MinUsedJSHeapSize || b.performanceTrackAggr.MinUsedJSHeapSize == 0 {
|
||||
b.performanceTrackAggr.MinUsedJSHeapSize = msg.UsedJSHeapSize
|
||||
}
|
||||
if msg.UsedJSHeapSize > b.performanceTrackAggr.MaxUsedJSHeapSize {
|
||||
b.performanceTrackAggr.MaxUsedJSHeapSize = msg.UsedJSHeapSize
|
||||
}
|
||||
b.sumFrameRate += frameRate
|
||||
b.sumTickRate += tickRate
|
||||
b.sumTotalJSHeapSize += float64(msg.TotalJSHeapSize)
|
||||
b.sumUsedJSHeapSize += float64(msg.UsedJSHeapSize)
|
||||
b.count += 1
|
||||
b.lastTimestamp = timestamp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *performanceTrackAggrBuilder) HasInstance() bool {
|
||||
return b.performanceTrackAggr != nil
|
||||
}
|
||||
|
||||
func (b *performanceTrackAggrBuilder) GetStartTimestamp() uint64 {
|
||||
if b.performanceTrackAggr == nil {
|
||||
return 0
|
||||
}
|
||||
return b.performanceTrackAggr.TimestampStart
|
||||
}
|
||||
|
||||
func (b *performanceTrackAggrBuilder) Build() *PerformanceTrackAggr {
|
||||
var performanceTrackAggr *PerformanceTrackAggr
|
||||
if b.HasInstance() && b.GetStartTimestamp() != b.lastTimestamp && b.count != 0 {
|
||||
performanceTrackAggr = b.performanceTrackAggr
|
||||
performanceTrackAggr.TimestampEnd = b.lastTimestamp
|
||||
performanceTrackAggr.AvgFPS = uint64(math.Round(b.sumFrameRate / b.count))
|
||||
performanceTrackAggr.AvgCPU = 100 - uint64(math.Round(b.sumTickRate*100/b.count))
|
||||
performanceTrackAggr.AvgTotalJSHeapSize = uint64(math.Round(b.sumTotalJSHeapSize / b.count))
|
||||
performanceTrackAggr.AvgUsedJSHeapSize = uint64(math.Round(b.sumUsedJSHeapSize / b.count))
|
||||
}
|
||||
b.performanceTrackAggr = nil
|
||||
func (b *performanceTrackAggrBuilder) reset() {
|
||||
b.PerformanceTrackAggr = nil
|
||||
b.count = 0
|
||||
b.sumFrameRate = 0
|
||||
b.sumTickRate = 0
|
||||
b.sumTotalJSHeapSize = 0
|
||||
b.sumUsedJSHeapSize = 0
|
||||
b.lastTimestamp = 0
|
||||
return performanceTrackAggr
|
||||
}
|
||||
|
||||
func (b *performanceTrackAggrBuilder) Handle(message Message, _ uint64, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *PerformanceTrack:
|
||||
if b.PerformanceTrackAggr == nil || msg.Frames == -1 || msg.Ticks == -1 {
|
||||
pta := b.Build()
|
||||
b.start(timestamp)
|
||||
return pta
|
||||
}
|
||||
|
||||
dt := performance.TimeDiff(timestamp, b.lastTimestamp)
|
||||
if dt == 0 {
|
||||
return nil // shouldn't happen
|
||||
}
|
||||
|
||||
frameRate := performance.FrameRate(msg.Frames, dt)
|
||||
tickRate := performance.TickRate(msg.Ticks, dt)
|
||||
|
||||
fps := uint64(math.Round(frameRate))
|
||||
cpu := performance.CPURateFromTickRate(tickRate)
|
||||
if fps < b.MinFPS || b.MinFPS == 0 {
|
||||
b.MinFPS = fps
|
||||
}
|
||||
if fps > b.MaxFPS {
|
||||
b.MaxFPS = fps
|
||||
}
|
||||
if cpu < b.MinCPU || b.MinCPU == 0 {
|
||||
b.MinCPU = cpu
|
||||
}
|
||||
if cpu > b.MaxCPU {
|
||||
b.MaxCPU = cpu
|
||||
}
|
||||
if msg.TotalJSHeapSize < b.MinTotalJSHeapSize || b.MinTotalJSHeapSize == 0 {
|
||||
b.MinTotalJSHeapSize = msg.TotalJSHeapSize
|
||||
}
|
||||
if msg.TotalJSHeapSize > b.MaxTotalJSHeapSize {
|
||||
b.MaxTotalJSHeapSize = msg.TotalJSHeapSize
|
||||
}
|
||||
if msg.UsedJSHeapSize < b.MinUsedJSHeapSize || b.MinUsedJSHeapSize == 0 {
|
||||
b.MinUsedJSHeapSize = msg.UsedJSHeapSize
|
||||
}
|
||||
if msg.UsedJSHeapSize > b.MaxUsedJSHeapSize {
|
||||
b.MaxUsedJSHeapSize = msg.UsedJSHeapSize
|
||||
}
|
||||
b.sumFrameRate += frameRate
|
||||
b.sumTickRate += tickRate
|
||||
b.sumTotalJSHeapSize += float64(msg.TotalJSHeapSize)
|
||||
b.sumUsedJSHeapSize += float64(msg.UsedJSHeapSize)
|
||||
b.count += 1
|
||||
b.lastTimestamp = timestamp
|
||||
}
|
||||
if b.PerformanceTrackAggr != nil &&
|
||||
timestamp-b.PerformanceTrackAggr.TimestampStart >= AGGREGATION_WINDOW {
|
||||
return b.Build()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *performanceTrackAggrBuilder) Build() Message {
|
||||
if b.PerformanceTrackAggr == nil {
|
||||
return nil
|
||||
}
|
||||
if b.count != 0 && b.PerformanceTrackAggr.TimestampStart < b.lastTimestamp { // the last one shouldn't happen
|
||||
b.PerformanceTrackAggr.TimestampEnd = b.lastTimestamp
|
||||
b.PerformanceTrackAggr.AvgFPS = uint64(math.Round(b.sumFrameRate / b.count))
|
||||
b.PerformanceTrackAggr.AvgCPU = 100 - uint64(math.Round(b.sumTickRate*100/b.count))
|
||||
b.PerformanceTrackAggr.AvgTotalJSHeapSize = uint64(math.Round(b.sumTotalJSHeapSize / b.count))
|
||||
b.PerformanceTrackAggr.AvgUsedJSHeapSize = uint64(math.Round(b.sumUsedJSHeapSize / b.count))
|
||||
b.reset()
|
||||
return b.PerformanceTrackAggr
|
||||
}
|
||||
b.reset()
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ const HEARTBEAT_INTERVAL = 2 * 60 * 1000 // максимальный
|
|||
const INTEGRATIONS_REQUEST_INTERVAL = 1 * 60 * 1000 // интеграции
|
||||
const EVENTS_PAGE_EVENT_TIMEOUT = 2 * 60 * 1000 // таймаут пейдж ивента
|
||||
const EVENTS_INPUT_EVENT_TIMEOUT = 2 * 60 * 1000 //
|
||||
const EVENTS_PERFORMANCE_AGGREGATION_TIMEOUT = 2 * 60 * 1000
|
||||
const EVENTS_SESSION_END_TIMEOUT = HEARTBEAT_INTERVAL + 30*1000
|
||||
const EVENTS_SESSION_END_TIMEOUT_WITH_INTEGRATIONS = HEARTBEAT_INTERVAL + 3*60*1000
|
||||
const EVENTS_BACK_COMMIT_GAP = EVENTS_SESSION_END_TIMEOUT_WITH_INTEGRATIONS + 1*60*1000 // для бэк коммита
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue