From 5fea1fb785eb40b3e4f63da12b058c7ef513ca91 Mon Sep 17 00:00:00 2001 From: ShiKhu Date: Wed, 30 Mar 2022 13:13:50 +0200 Subject: [PATCH 01/15] fix(ee/kafka): restart worker after consumer timeout error --- ee/backend/pkg/kafka/consumer.go | 83 +++++++++++++++----------------- 1 file changed, 40 insertions(+), 43 deletions(-) diff --git a/ee/backend/pkg/kafka/consumer.go b/ee/backend/pkg/kafka/consumer.go index 1483c2ccf..ca0544923 100644 --- a/ee/backend/pkg/kafka/consumer.go +++ b/ee/backend/pkg/kafka/consumer.go @@ -9,9 +9,9 @@ import ( "github.com/pkg/errors" + "gopkg.in/confluentinc/confluent-kafka-go.v1/kafka" "openreplay/backend/pkg/env" "openreplay/backend/pkg/queue/types" - "gopkg.in/confluentinc/confluent-kafka-go.v1/kafka" ) type Message = kafka.Message @@ -19,7 +19,7 @@ type Message = kafka.Message type Consumer struct { c *kafka.Consumer messageHandler types.MessageHandler - commitTicker *time.Ticker + commitTicker *time.Ticker pollTimeout uint lastKafkaEventTs int64 @@ -56,7 +56,7 @@ func NewConsumer(group string, topics []string, messageHandler types.MessageHand return &Consumer{ c: c, messageHandler: messageHandler, - commitTicker: time.NewTicker(2 * time.Minute), + commitTicker: time.NewTicker(2 * time.Minute), pollTimeout: 200, } } @@ -65,13 +65,12 @@ func (consumer *Consumer) DisableAutoCommit() { consumer.commitTicker.Stop() } - func (consumer *Consumer) Commit() error { consumer.c.Commit() // TODO: return error if it is not "No offset stored" return nil } -func (consumer *Consumer) CommitAtTimestamp(commitTs int64) error { +func (consumer *Consumer) CommitAtTimestamp(commitTs int64) error { assigned, err := consumer.c.Assignment() if err != nil { return err @@ -84,37 +83,38 @@ func (consumer *Consumer) CommitAtTimestamp(commitTs int64) error { timestamps = append(timestamps, p) } offsets, err := consumer.c.OffsetsForTimes(timestamps, 2000) - if err != nil { + if err != nil { return errors.Wrap(err, "Kafka Consumer back commit error") } // Limiting to already committed committed, err := consumer.c.Committed(assigned, 2000) // memorise? - logPartitions("Actually committed:",committed) + logPartitions("Actually committed:", committed) if err != nil { return errors.Wrap(err, "Kafka Consumer retrieving committed error") } for _, offs := range offsets { for _, comm := range committed { - if comm.Offset == kafka.OffsetStored || + if comm.Offset == kafka.OffsetStored || comm.Offset == kafka.OffsetInvalid || - comm.Offset == kafka.OffsetBeginning || - comm.Offset == kafka.OffsetEnd { continue } - if comm.Partition == offs.Partition && + comm.Offset == kafka.OffsetBeginning || + comm.Offset == kafka.OffsetEnd { + continue + } + if comm.Partition == offs.Partition && (comm.Topic != nil && offs.Topic != nil && *comm.Topic == *offs.Topic) && - comm.Offset > offs.Offset { + comm.Offset > offs.Offset { offs.Offset = comm.Offset } } } - // TODO: check per-partition errors: offsets[i].Error + // TODO: check per-partition errors: offsets[i].Error _, err = consumer.c.CommitOffsets(offsets) return errors.Wrap(err, "Kafka Consumer back commit error") } - -func (consumer *Consumer) CommitBack(gap int64) error { +func (consumer *Consumer) CommitBack(gap int64) error { if consumer.lastKafkaEventTs == 0 { return nil } @@ -135,31 +135,31 @@ func (consumer *Consumer) ConsumeNext() error { } switch e := ev.(type) { - case *kafka.Message: - if e.TopicPartition.Error != nil { - return errors.Wrap(e.TopicPartition.Error, "Consumer Partition Error") - } - ts := e.Timestamp.UnixNano()/ 1e6 - consumer.messageHandler(decodeKey(e.Key), e.Value, &types.Meta{ - Topic: *(e.TopicPartition.Topic), - ID: uint64(e.TopicPartition.Offset), - Timestamp: ts, - }) - consumer.lastKafkaEventTs = ts - // case kafka.AssignedPartitions: - // logPartitions("Kafka Consumer: Partitions Assigned", e.Partitions) - // consumer.partitions = e.Partitions - // consumer.c.Assign(e.Partitions) - // log.Printf("Actually partitions assigned!") - // case kafka.RevokedPartitions: - // log.Println("Kafka Cosumer: Partitions Revoked") - // consumer.partitions = nil - // consumer.c.Unassign() - case kafka.Error: - if e.Code() == kafka.ErrAllBrokersDown { - os.Exit(1) - } - log.Printf("Consumer error: %v\n", e) + case *kafka.Message: + if e.TopicPartition.Error != nil { + return errors.Wrap(e.TopicPartition.Error, "Consumer Partition Error") + } + ts := e.Timestamp.UnixNano() / 1e6 + consumer.messageHandler(decodeKey(e.Key), e.Value, &types.Meta{ + Topic: *(e.TopicPartition.Topic), + ID: uint64(e.TopicPartition.Offset), + Timestamp: ts, + }) + consumer.lastKafkaEventTs = ts + // case kafka.AssignedPartitions: + // logPartitions("Kafka Consumer: Partitions Assigned", e.Partitions) + // consumer.partitions = e.Partitions + // consumer.c.Assign(e.Partitions) + // log.Printf("Actually partitions assigned!") + // case kafka.RevokedPartitions: + // log.Println("Kafka Cosumer: Partitions Revoked") + // consumer.partitions = nil + // consumer.c.Unassign() + case kafka.Error: + if e.Code() == kafka.ErrAllBrokersDown || e.Code() == kafka.ErrMaxPollExceeded { + os.Exit(1) + } + log.Printf("Consumer error: %v\n", e) } return nil } @@ -173,8 +173,6 @@ func (consumer *Consumer) Close() { } } - - // func (consumer *Consumer) consume( // message func(m *kafka.Message) error, // commit func(c *kafka.Consumer) error, @@ -230,7 +228,6 @@ func (consumer *Consumer) Close() { // } // } - // func (consumer *Consumer) Consume( // message func(key uint64, value []byte) error, // ) error { From 43dc66584fe63fb77c20ff254fbdfb6f65054035 Mon Sep 17 00:00:00 2001 From: ShiKhu Date: Wed, 30 Mar 2022 13:38:49 +0200 Subject: [PATCH 02/15] fix(tracker-fetch):3.5.2:replaceDefault recursion fix --- tracker/tracker-fetch/LICENSE | 64 ++++++++++++++++++++++-------- tracker/tracker-fetch/package.json | 2 +- tracker/tracker-fetch/src/index.ts | 7 ++-- 3 files changed, 53 insertions(+), 20 deletions(-) diff --git a/tracker/tracker-fetch/LICENSE b/tracker/tracker-fetch/LICENSE index b57f138e0..c5956c893 100644 --- a/tracker/tracker-fetch/LICENSE +++ b/tracker/tracker-fetch/LICENSE @@ -1,19 +1,51 @@ -Copyright (c) 2021 OpenReplay.com +Copyright (c) 2022 Asayer, Inc. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +Reach out (license@openreplay.com) if you have any questions regarding the license. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +------------------------------------------------------------------------------------ + +Elastic License 2.0 (ELv2) + +**Acceptance** +By using the software, you agree to all of the terms and conditions below. + +**Copyright License** +The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable license to use, copy, distribute, make available, and prepare derivative works of the software, in each case subject to the limitations and conditions below + +**Limitations** +You may not provide the software to third parties as a hosted or managed service, where the service provides users with access to any substantial set of the features or functionality of the software. + +You may not move, change, disable, or circumvent the license key functionality in the software, and you may not remove or obscure any functionality in the software that is protected by the license key. + +You may not alter, remove, or obscure any licensing, copyright, or other notices of the licensor in the software. Any use of the licensor’s trademarks is subject to applicable law. + +**Patents** +The licensor grants you a license, under any patent claims the licensor can license, or becomes able to license, to make, have made, use, sell, offer for sale, import and have imported the software, in each case subject to the limitations and conditions in this license. This license does not cover any patent claims that you cause to be infringed by modifications or additions to the software. If you or your company make any written claim that the software infringes or contributes to infringement of any patent, your patent license for the software granted under these terms ends immediately. If your company makes such a claim, your patent license ends immediately for work on behalf of your company. + +**Notices** +You must ensure that anyone who gets a copy of any part of the software from you also gets a copy of these terms. + +If you modify the software, you must include in any modified copies of the software prominent notices stating that you have modified the software. + +**No Other Rights** +These terms do not imply any licenses other than those expressly granted in these terms. + +**Termination** +If you use the software in violation of these terms, such use is not licensed, and your licenses will automatically terminate. If the licensor provides you with a notice of your violation, and you cease all violation of this license no later than 30 days after you receive that notice, your licenses will be reinstated retroactively. However, if you violate these terms after such reinstatement, any additional violation of these terms will cause your licenses to terminate automatically and permanently. + +**No Liability** +As far as the law allows, the software comes as is, without any warranty or condition, and the licensor will not be liable to you for any damages arising out of these terms or the use or nature of the software, under any kind of legal claim. + +**Definitions** +The *licensor* is the entity offering these terms, and the *software* is the software the licensor makes available under these terms, including any portion of it. + +*you* refers to the individual or entity agreeing to these terms. + +*your company* is any legal entity, sole proprietorship, or other kind of organization that you work for, plus all organizations that have control over, are under the control of, or are under common control with that organization. *control* means ownership of substantially all the assets of an entity, or the power to direct its management and policies by vote, contract, or otherwise. Control can be direct or indirect. + +*your licenses* are all the licenses granted to you for the software under these terms. + +*use* means anything you do with the software requiring one of your licenses. + +*trademark* means trademarks, service marks, and similar rights. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/tracker/tracker-fetch/package.json b/tracker/tracker-fetch/package.json index 11e7bf6ff..0b1373edc 100644 --- a/tracker/tracker-fetch/package.json +++ b/tracker/tracker-fetch/package.json @@ -1,7 +1,7 @@ { "name": "@openreplay/tracker-fetch", "description": "Tracker plugin for fetch requests recording ", - "version": "3.5.1", + "version": "3.5.2", "keywords": [ "fetch", "logging", diff --git a/tracker/tracker-fetch/src/index.ts b/tracker/tracker-fetch/src/index.ts index 910d20ec7..93e39cce5 100644 --- a/tracker/tracker-fetch/src/index.ts +++ b/tracker/tracker-fetch/src/index.ts @@ -33,9 +33,10 @@ export default function(opts: Partial = {}) { }, opts, ); + const origFetch = window.fetch return (app: App | null) => { if (app === null) { - return window.fetch; + return origFetch } const ihOpt = options.ignoreHeaders @@ -45,7 +46,7 @@ export default function(opts: Partial = {}) { const fetch = async (input: RequestInfo, init: RequestInit = {}) => { if (typeof input !== 'string') { - return window.fetch(input, init); + return origFetch(input, init); } if (options.sessionTokenHeader) { const sessionToken = app.getSessionToken(); @@ -63,7 +64,7 @@ export default function(opts: Partial = {}) { } } const startTime = performance.now(); - const response = await window.fetch(input, init); + const response = await origFetch(input, init); const duration = performance.now() - startTime; if (options.failuresOnly && response.status < 400) { return response From ae6f18fafb5233b55d9c14080c2e3b89307bf146 Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Wed, 30 Mar 2022 13:40:25 +0200 Subject: [PATCH 03/15] chore(helm): databases make minio installation optional. Signed-off-by: rjshrjndrn --- scripts/helmcharts/databases/Chart.yaml | 4 ++++ scripts/helmcharts/databases/values.yaml | 3 +++ 2 files changed, 7 insertions(+) diff --git a/scripts/helmcharts/databases/Chart.yaml b/scripts/helmcharts/databases/Chart.yaml index 0aa62594a..24472a68f 100644 --- a/scripts/helmcharts/databases/Chart.yaml +++ b/scripts/helmcharts/databases/Chart.yaml @@ -40,3 +40,7 @@ dependencies: repository: file://charts/redis version: 12.10.1 condition: redis.enabled + - name: minio + repository: file://charts/minio + version: 3.7.14 + condition: minio.enabled diff --git a/scripts/helmcharts/databases/values.yaml b/scripts/helmcharts/databases/values.yaml index 6c7f28960..6b106e15a 100644 --- a/scripts/helmcharts/databases/values.yaml +++ b/scripts/helmcharts/databases/values.yaml @@ -99,6 +99,9 @@ redis: cpu: 100m memory: 128Mi +minio: + enabled: true + postgresql: # postgresqlPassword: asayerPostgres fullnameOverride: postgresql From 8c975769702f7d355aecc391c211156f0ce8505d Mon Sep 17 00:00:00 2001 From: ShiKhu Date: Wed, 30 Mar 2022 18:27:16 +0200 Subject: [PATCH 04/15] codefix(backend): autoCommit as init consumer parameter --- backend/pkg/queue/import.go | 5 +-- backend/pkg/queue/messages.go | 5 +-- backend/pkg/queue/types/types.go | 8 +--- backend/pkg/redisstream/consumer.go | 47 +++++++++------------ backend/services/assets/main.go | 18 ++++---- backend/services/db/main.go | 2 +- backend/services/ender/main.go | 21 +++++----- backend/services/sink/main.go | 64 ++++++++++++++--------------- backend/services/storage/main.go | 48 +++++++++------------- ee/backend/pkg/kafka/consumer.go | 28 ++++++++----- ee/backend/pkg/queue/import.go | 7 ++-- 11 files changed, 117 insertions(+), 136 deletions(-) diff --git a/backend/pkg/queue/import.go b/backend/pkg/queue/import.go index 2bca9c8fd..623d301ca 100644 --- a/backend/pkg/queue/import.go +++ b/backend/pkg/queue/import.go @@ -1,15 +1,14 @@ package queue import ( - "openreplay/backend/pkg/redisstream" "openreplay/backend/pkg/queue/types" + "openreplay/backend/pkg/redisstream" ) -func NewConsumer(group string, topics []string, handler types.MessageHandler) types.Consumer { +func NewConsumer(group string, topics []string, handler types.MessageHandler, _ bool) types.Consumer { return redisstream.NewConsumer(group, topics, handler) } func NewProducer() types.Producer { return redisstream.NewProducer() } - diff --git a/backend/pkg/queue/messages.go b/backend/pkg/queue/messages.go index eca4a4d49..0ab184ee6 100644 --- a/backend/pkg/queue/messages.go +++ b/backend/pkg/queue/messages.go @@ -7,13 +7,12 @@ import ( "openreplay/backend/pkg/queue/types" ) - -func NewMessageConsumer(group string, topics []string, handler types.DecodedMessageHandler) types.Consumer { +func NewMessageConsumer(group string, topics []string, handler types.DecodedMessageHandler, autoCommit bool) types.Consumer { return NewConsumer(group, topics, func(sessionID uint64, value []byte, meta *types.Meta) { if err := messages.ReadBatch(value, func(msg messages.Message) { handler(sessionID, msg, meta) }); err != nil { log.Printf("Decode error: %v\n", err) } - }) + }, autoCommit) } diff --git a/backend/pkg/queue/types/types.go b/backend/pkg/queue/types/types.go index b671323d0..600babe25 100644 --- a/backend/pkg/queue/types/types.go +++ b/backend/pkg/queue/types/types.go @@ -6,26 +6,22 @@ import ( type Consumer interface { ConsumeNext() error - DisableAutoCommit() Commit() error CommitBack(gap int64) error Close() } - type Producer interface { Produce(topic string, key uint64, value []byte) error Close(timeout int) Flush(timeout int) } - type Meta struct { - ID uint64 - Topic string + ID uint64 + Topic string Timestamp int64 } type MessageHandler func(uint64, []byte, *Meta) type DecodedMessageHandler func(uint64, messages.Message, *Meta) - diff --git a/backend/pkg/redisstream/consumer.go b/backend/pkg/redisstream/consumer.go index 164ee9236..d32972981 100644 --- a/backend/pkg/redisstream/consumer.go +++ b/backend/pkg/redisstream/consumer.go @@ -1,24 +1,22 @@ package redisstream import ( + "log" "net" + "sort" "strconv" "strings" - "log" - "sort" "time" - "github.com/pkg/errors" _redis "github.com/go-redis/redis" + "github.com/pkg/errors" "openreplay/backend/pkg/queue/types" ) - - -type idsInfo struct{ - id []string - ts []int64 +type idsInfo struct { + id []string + ts []int64 } type streamPendingIDsMap map[string]*idsInfo @@ -41,26 +39,25 @@ func NewConsumer(group string, streams []string, messageHandler types.MessageHan } } - idsPending := make(streamPendingIDsMap) streamsCount := len(streams) for i := 0; i < streamsCount; i++ { - // ">" is for never-delivered messages. - // Otherwise - never acknoledged only + // ">" is for never-delivered messages. + // Otherwise - never acknoledged only // TODO: understand why in case of "0" it eats 100% cpu - streams = append(streams, ">") - + streams = append(streams, ">") + idsPending[streams[i]] = new(idsInfo) } return &Consumer{ - redis: redis, + redis: redis, messageHandler: messageHandler, - streams: streams, - group: group, - autoCommit: true, - idsPending: idsPending, + streams: streams, + group: group, + autoCommit: true, + idsPending: idsPending, } } @@ -106,9 +103,9 @@ func (c *Consumer) ConsumeNext() error { return errors.New("Too many messages per ms in redis") } c.messageHandler(sessionID, []byte(valueString), &types.Meta{ - Topic: r.Stream, + Topic: r.Stream, Timestamp: int64(ts), - ID: ts << 13 | (idx & 0x1FFF), // Max: 4096 messages/ms for 69 years + ID: ts<<13 | (idx & 0x1FFF), // Max: 4096 messages/ms for 69 years }) if c.autoCommit { if err = c.redis.XAck(r.Stream, c.group, m.ID).Err(); err != nil { @@ -119,7 +116,7 @@ func (c *Consumer) ConsumeNext() error { c.idsPending[r.Stream].id = append(c.idsPending[r.Stream].id, m.ID) c.idsPending[r.Stream].ts = append(c.idsPending[r.Stream].ts, int64(ts)) } - + } } return nil @@ -158,13 +155,9 @@ func (c *Consumer) CommitBack(gap int64) error { c.idsPending[stream].id = idsInfo.id[maxI:] c.idsPending[stream].ts = idsInfo.ts[maxI:] } - return nil -} - -func (c *Consumer) DisableAutoCommit() { - //c.autoCommit = false + return nil } func (c *Consumer) Close() { // noop -} \ No newline at end of file +} diff --git a/backend/services/assets/main.go b/backend/services/assets/main.go index 450dfc83c..664dc5b09 100644 --- a/backend/services/assets/main.go +++ b/backend/services/assets/main.go @@ -18,7 +18,7 @@ import ( func main() { log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) - GROUP_CACHE := env.String("GROUP_CACHE") + GROUP_CACHE := env.String("GROUP_CACHE") TOPIC_CACHE := env.String("TOPIC_CACHE") cacher := cacher.NewCacher( @@ -29,10 +29,10 @@ func main() { ) consumer := queue.NewMessageConsumer( - GROUP_CACHE, - []string{ TOPIC_CACHE }, + GROUP_CACHE, + []string{TOPIC_CACHE}, func(sessionID uint64, message messages.Message, e *types.Meta) { - switch msg := message.(type) { + switch msg := message.(type) { case *messages.AssetCache: cacher.CacheURL(sessionID, msg.URL) case *messages.ErrorEvent: @@ -47,17 +47,17 @@ func main() { for _, source := range sourceList { cacher.CacheJSFile(source) } - } + } }, + true, ) - tick := time.Tick(20 * time.Minute) sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) + signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) - log.Printf("Cacher service started\n") + log.Printf("Cacher service started\n") for { select { case sig := <-sigchan: @@ -74,4 +74,4 @@ func main() { } } } -} \ No newline at end of file +} diff --git a/backend/services/db/main.go b/backend/services/db/main.go index d6190a4f0..2ad6e4aa8 100644 --- a/backend/services/db/main.go +++ b/backend/services/db/main.go @@ -74,8 +74,8 @@ func main() { } }) }, + false, ) - consumer.DisableAutoCommit() sigchan := make(chan os.Signal, 1) signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) diff --git a/backend/services/ender/main.go b/backend/services/ender/main.go index e8d739f0e..f0f139dce 100644 --- a/backend/services/ender/main.go +++ b/backend/services/ender/main.go @@ -8,12 +8,12 @@ import ( "os/signal" "syscall" - "openreplay/backend/pkg/intervals" "openreplay/backend/pkg/env" + "openreplay/backend/pkg/intervals" + logger "openreplay/backend/pkg/log" "openreplay/backend/pkg/messages" "openreplay/backend/pkg/queue" "openreplay/backend/pkg/queue/types" - logger "openreplay/backend/pkg/log" "openreplay/backend/services/ender/builder" ) @@ -29,24 +29,24 @@ func main() { producer := queue.NewProducer() consumer := queue.NewMessageConsumer( - GROUP_EVENTS, - []string{ + GROUP_EVENTS, + []string{ env.String("TOPIC_RAW_WEB"), env.String("TOPIC_RAW_IOS"), - }, + }, func(sessionID uint64, msg messages.Message, meta *types.Meta) { statsLogger.HandleAndLog(sessionID, meta) builderMap.HandleMessage(sessionID, msg, msg.Meta().Index) }, + false, ) - consumer.DisableAutoCommit() - + tick := time.Tick(intervals.EVENTS_COMMIT_INTERVAL * time.Millisecond) sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) + signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) - log.Printf("Ender service started\n") + log.Printf("Ender service started\n") for { select { case sig := <-sigchan: @@ -55,7 +55,7 @@ func main() { consumer.CommitBack(intervals.EVENTS_BACK_COMMIT_GAP) consumer.Close() os.Exit(0) - case <- tick: + case <-tick: builderMap.IterateReadyMessages(time.Now().UnixNano()/1e6, func(sessionID uint64, readyMsg messages.Message) { producer.Produce(TOPIC_TRIGGER, sessionID, messages.Encode(readyMsg)) }) @@ -69,4 +69,3 @@ func main() { } } } - diff --git a/backend/services/sink/main.go b/backend/services/sink/main.go index 5893e93e6..a649bb6ef 100644 --- a/backend/services/sink/main.go +++ b/backend/services/sink/main.go @@ -1,8 +1,8 @@ package main import ( - "log" "encoding/binary" + "log" "time" "os" @@ -10,67 +10,64 @@ import ( "syscall" "openreplay/backend/pkg/env" + . "openreplay/backend/pkg/messages" "openreplay/backend/pkg/queue" "openreplay/backend/pkg/queue/types" - . "openreplay/backend/pkg/messages" ) - - func main() { log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) - FS_DIR := env.String("FS_DIR"); + FS_DIR := env.String("FS_DIR") if _, err := os.Stat(FS_DIR); os.IsNotExist(err) { log.Fatalf("%v doesn't exist. %v", FS_DIR, err) } writer := NewWriter(env.Uint16("FS_ULIMIT"), FS_DIR) - count := 0 + count := 0 consumer := queue.NewMessageConsumer( env.String("GROUP_SINK"), - []string{ + []string{ env.String("TOPIC_RAW_WEB"), env.String("TOPIC_RAW_IOS"), - }, - func(sessionID uint64, message Message, _ *types.Meta) { - //typeID, err := GetMessageTypeID(value) - // if err != nil { - // log.Printf("Message type decoding error: %v", err) - // return - // } - typeID := message.Meta().TypeID - if !IsReplayerType(typeID) { - return - } + }, + func(sessionID uint64, message Message, _ *types.Meta) { + //typeID, err := GetMessageTypeID(value) + // if err != nil { + // log.Printf("Message type decoding error: %v", err) + // return + // } + typeID := message.Meta().TypeID + if !IsReplayerType(typeID) { + return + } - count++ + count++ - value := message.Encode() - var data []byte - if IsIOSType(typeID) { - data = value - } else { + value := message.Encode() + var data []byte + if IsIOSType(typeID) { + data = value + } else { data = make([]byte, len(value)+8) copy(data[8:], value[:]) binary.LittleEndian.PutUint64(data[0:], message.Meta().Index) - } - if err := writer.Write(sessionID, data); err != nil { + } + if err := writer.Write(sessionID, data); err != nil { log.Printf("Writer error: %v\n", err) } - }, + }, + false, ) - consumer.DisableAutoCommit() - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) + signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) - tick := time.Tick(30 * time.Second) + tick := time.Tick(30 * time.Second) - log.Printf("Sink service started\n") + log.Printf("Sink service started\n") for { select { case sig := <-sigchan: @@ -85,7 +82,7 @@ func main() { log.Printf("%v messages during 30 sec", count) count = 0 - + consumer.Commit() default: err := consumer.ConsumeNext() @@ -96,4 +93,3 @@ func main() { } } - diff --git a/backend/services/storage/main.go b/backend/services/storage/main.go index 5033fb845..cd585f10e 100644 --- a/backend/services/storage/main.go +++ b/backend/services/storage/main.go @@ -2,45 +2,41 @@ package main import ( "log" - "time" "os" "strconv" + "time" "os/signal" "syscall" "openreplay/backend/pkg/env" - "openreplay/backend/pkg/storage" "openreplay/backend/pkg/messages" "openreplay/backend/pkg/queue" "openreplay/backend/pkg/queue/types" + "openreplay/backend/pkg/storage" ) - - func main() { log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) - - storageWeb := storage.NewS3(env.String("AWS_REGION_WEB"), env.String("S3_BUCKET_WEB")) - //storageIos := storage.NewS3(env.String("AWS_REGION_IOS"), env.String("S3_BUCKET_IOS")) + storage := storage.NewS3(env.String("AWS_REGION_WEB"), env.String("S3_BUCKET_WEB")) FS_DIR := env.String("FS_DIR") FS_CLEAN_HRS := env.Int("FS_CLEAN_HRS") - var uploadKey func(string, int, *storage.S3) - uploadKey = func(key string, retryCount int, s *storage.S3) { + var uploadKey func(string, int) + uploadKey = func(key string, retryCount int) { if retryCount <= 0 { - return; + return } file, err := os.Open(FS_DIR + "/" + key) defer file.Close() if err != nil { log.Printf("File error: %v; Will retry %v more time(s)\n", err, retryCount) time.AfterFunc(2*time.Minute, func() { - uploadKey(key, retryCount - 1, s) + uploadKey(key, retryCount-1) }) } else { - if err := s.Upload(gzipFile(file), key, "application/octet-stream", true); err != nil { + if err := storage.Upload(gzipFile(file), key, "application/octet-stream", true); err != nil { log.Fatalf("Storage upload error: %v\n", err) } } @@ -48,27 +44,24 @@ func main() { consumer := queue.NewMessageConsumer( env.String("GROUP_STORAGE"), - []string{ + []string{ env.String("TOPIC_TRIGGER"), - }, - func(sessionID uint64, msg messages.Message, meta *types.Meta) { - switch msg.(type) { - case *messages.SessionEnd: - uploadKey(strconv.FormatUint(sessionID, 10), 5, storageWeb) - //case *messages.IOSSessionEnd: - // uploadKey(strconv.FormatUint(sessionID, 10), 5, storageIos) - } - }, + }, + func(sessionID uint64, msg messages.Message, meta *types.Meta) { + switch msg.(type) { + case *messages.SessionEnd: + uploadKey(strconv.FormatUint(sessionID, 10), 5) + } + }, + true, ) sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) + signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) + cleanTick := time.Tick(time.Duration(FS_CLEAN_HRS) * time.Hour) - cleanTick := time.Tick(time.Duration(FS_CLEAN_HRS) * time.Hour) - - - log.Printf("Storage service started\n") + log.Printf("Storage service started\n") for { select { case sig := <-sigchan: @@ -85,4 +78,3 @@ func main() { } } } - diff --git a/ee/backend/pkg/kafka/consumer.go b/ee/backend/pkg/kafka/consumer.go index ca0544923..82aa56d50 100644 --- a/ee/backend/pkg/kafka/consumer.go +++ b/ee/backend/pkg/kafka/consumer.go @@ -25,7 +25,12 @@ type Consumer struct { lastKafkaEventTs int64 } -func NewConsumer(group string, topics []string, messageHandler types.MessageHandler) *Consumer { +func NewConsumer( + group string, + topics []string, + messageHandler types.MessageHandler, + autoCommit bool, +) *Consumer { protocol := "plaintext" if env.Bool("KAFKA_USE_SSL") { protocol = "ssl" @@ -53,18 +58,19 @@ func NewConsumer(group string, topics []string, messageHandler types.MessageHand log.Fatalln(err) } + var commitTicker *time.Ticker + if autoCommit { + commitTicker = time.NewTicker(2 * time.Minute) + } + return &Consumer{ c: c, messageHandler: messageHandler, - commitTicker: time.NewTicker(2 * time.Minute), + commitTicker: commitTicker, pollTimeout: 200, } } -func (consumer *Consumer) DisableAutoCommit() { - consumer.commitTicker.Stop() -} - func (consumer *Consumer) Commit() error { consumer.c.Commit() // TODO: return error if it is not "No offset stored" return nil @@ -128,10 +134,12 @@ func (consumer *Consumer) ConsumeNext() error { return nil } - select { - case <-consumer.commitTicker.C: - consumer.Commit() - default: + if consumer.commitTicker != nil { + select { + case <-consumer.commitTicker.C: + consumer.Commit() + default: + } } switch e := ev.(type) { diff --git a/ee/backend/pkg/queue/import.go b/ee/backend/pkg/queue/import.go index abff07e9a..e95eb11e5 100644 --- a/ee/backend/pkg/queue/import.go +++ b/ee/backend/pkg/queue/import.go @@ -2,17 +2,16 @@ package queue import ( "openreplay/backend/pkg/kafka" - "openreplay/backend/pkg/queue/types" "openreplay/backend/pkg/license" + "openreplay/backend/pkg/queue/types" ) -func NewConsumer(group string, topics []string, handler types.MessageHandler) types.Consumer { +func NewConsumer(group string, topics []string, handler types.MessageHandler, autoCommit bool) types.Consumer { license.CheckLicense() - return kafka.NewConsumer(group, topics, handler) + return kafka.NewConsumer(group, topics, handler, autoCommit) } func NewProducer() types.Producer { license.CheckLicense() return kafka.NewProducer() } - From daa5a7fad5ca94eb9041570b241a81666944dfd0 Mon Sep 17 00:00:00 2001 From: ShiKhu Date: Wed, 30 Mar 2022 18:30:33 +0200 Subject: [PATCH 05/15] codefix(backend): code-format & Close() body on /i --- backend/services/http/handlers_web.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/backend/services/http/handlers_web.go b/backend/services/http/handlers_web.go index 09d2511d8..6020c3eb1 100644 --- a/backend/services/http/handlers_web.go +++ b/backend/services/http/handlers_web.go @@ -11,8 +11,8 @@ import ( "time" "openreplay/backend/pkg/db/postgres" - "openreplay/backend/pkg/token" . "openreplay/backend/pkg/messages" + "openreplay/backend/pkg/token" ) func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) { @@ -30,12 +30,12 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) { UserID string `json:"userID"` } type response struct { - Timestamp int64 `json:"timestamp"` - Delay int64 `json:"delay"` - Token string `json:"token"` - UserUUID string `json:"userUUID"` - SessionID string `json:"sessionID"` - BeaconSizeLimit int64 `json:"beaconSizeLimit"` + Timestamp int64 `json:"timestamp"` + Delay int64 `json:"delay"` + Token string `json:"token"` + UserUUID string `json:"userUUID"` + SessionID string `json:"sessionID"` + BeaconSizeLimit int64 `json:"beaconSizeLimit"` } startTime := time.Now() @@ -102,7 +102,7 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) { UserCountry: country, UserDeviceMemorySize: req.DeviceMemory, UserDeviceHeapSize: req.JsHeapSizeLimit, - UserID: req.UserID, + UserID: req.UserID, })) } @@ -110,9 +110,9 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) { responseWithJSON(w, &response{ //Timestamp: startTime.UnixNano() / 1e6, //Delay: delayDuration.Nanoseconds() / 1e6, - Token: tokenizer.Compose(*tokenData), - UserUUID: userUUID, - SessionID: strconv.FormatUint(tokenData.ID, 10), + Token: tokenizer.Compose(*tokenData), + UserUUID: userUUID, + SessionID: strconv.FormatUint(tokenData.ID, 10), BeaconSizeLimit: BEACON_SIZE_LIMIT, }) } @@ -124,7 +124,7 @@ func pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) { return } body := http.MaxBytesReader(w, r.Body, BEACON_SIZE_LIMIT) - //defer body.Close() + defer body.Close() buf, err := ioutil.ReadAll(body) if err != nil { responseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging @@ -248,4 +248,4 @@ func notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) { log.Printf("Unable to insert Unstarted Session: %v\n", err) } w.WriteHeader(http.StatusOK) -} \ No newline at end of file +} From 9e294d4edd6f52a67eab190e3f3772af7b0d460a Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Thu, 31 Mar 2022 18:32:27 +0200 Subject: [PATCH 06/15] fix(nginx): socketio endpoint directly from kube service nginx upstream block has some issue with forwarding the socketio connection Signed-off-by: rjshrjndrn --- .../openreplay/charts/nginx-ingress/templates/configMap.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/configMap.yaml b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/configMap.yaml index 6780c9ee6..6322318e3 100644 --- a/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/configMap.yaml +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/configMap.yaml @@ -82,7 +82,7 @@ data: proxy_set_header Host $host; proxy_set_header X-Forwarded-For $origin_forwarded_ip; proxy_set_header X-Real-IP $origin_forwarded_ip; - proxy_pass http://utilities-pool; + proxy_pass http://utilities-openreplay.app.svc.cluster.local:9001; } location /assets/ { rewrite ^/assets/(.*) /sessions-assets/$1 break; From 77fc8221ac6b49631ff92b8f7cc75558f917cd81 Mon Sep 17 00:00:00 2001 From: rjshrjndrn Date: Thu, 31 Mar 2022 19:50:07 +0200 Subject: [PATCH 07/15] chore(helm): moving utils default vars Signed-off-by: rjshrjndrn --- scripts/helmcharts/openreplay/charts/utilities/values.yaml | 3 +++ scripts/helmcharts/vars.yaml | 7 ------- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/scripts/helmcharts/openreplay/charts/utilities/values.yaml b/scripts/helmcharts/openreplay/charts/utilities/values.yaml index 87b3d771d..8265b5aa5 100644 --- a/scripts/helmcharts/openreplay/charts/utilities/values.yaml +++ b/scripts/helmcharts/openreplay/charts/utilities/values.yaml @@ -83,6 +83,9 @@ autoscaling: env: REDIS_URL: "redis://redis-master.db.svc.cluster.local:6379" + debug: 0 + uws: false + redis: false nodeSelector: {} diff --git a/scripts/helmcharts/vars.yaml b/scripts/helmcharts/vars.yaml index e4ed88711..24ea9344d 100644 --- a/scripts/helmcharts/vars.yaml +++ b/scripts/helmcharts/vars.yaml @@ -95,13 +95,6 @@ chalice: # idp_name: '' # idp_tenantKey: '' -utilities: - replicaCount: 1 - env: - debug: 0 - uws: false - redis: false - # If you want to override something # chartname: # filedFrom chart/Values.yaml: From a0a82e0d77a75d6154ea6fffc5e58128880fa117 Mon Sep 17 00:00:00 2001 From: ShiKhu Date: Fri, 1 Apr 2022 17:19:46 +0200 Subject: [PATCH 08/15] codeclean(backend): remove alert.go --- backend/pkg/db/postgres/alert.go | 228 ------------------------------- 1 file changed, 228 deletions(-) delete mode 100644 backend/pkg/db/postgres/alert.go diff --git a/backend/pkg/db/postgres/alert.go b/backend/pkg/db/postgres/alert.go deleted file mode 100644 index 964977bd3..000000000 --- a/backend/pkg/db/postgres/alert.go +++ /dev/null @@ -1,228 +0,0 @@ -package postgres - -import ( - "database/sql" - "errors" - "fmt" - sq "github.com/Masterminds/squirrel" - "log" - "strconv" - "time" -) - -type TimeString sql.NullString -type query struct { - Left string `db:"query.left" json:"left"` - Operator string `db:"query.operator" json:"operator"` - Right float64 `db:"query.right" json:"right"` -} -type options struct { - RenotifyInterval int64 `db:"options.renotifyInterval" json:"renotifyInterval"` - LastNotification int64 `db:"options.lastNotification" json:"lastNotification;omitempty"` - CurrentPeriod int64 `db:"options.currentPeriod" json:"currentPeriod"` - PreviousPeriod int64 `db:"options.previousPeriod" json:"previousPeriod;omitempty"` - Message []map[string]string `db:"options.message" json:"message;omitempty"` - Change string `db:"options.change" json:"change;omitempty"` -} -type Alert struct { - AlertID uint32 `db:"alert_id" json:"alert_id"` - ProjectID uint32 `db:"project_id" json:"project_id"` - Name string `db:"name" json:"name"` - Description sql.NullString `db:"description" json:"description"` - Active bool `db:"active" json:"active"` - DetectionMethod string `db:"detection_method" json:"detection_method"` - Query query `db:"query" json:"query"` - DeletedAt *int64 `db:"deleted_at" json:"deleted_at"` - CreatedAt *int64 `db:"created_at" json:"created_at"` - Options options `db:"options" json:"options"` - TenantId uint32 `db:"tenant_id" json:"tenant_id"` -} - -func (pg *Conn) IterateAlerts(iter func(alert *Alert, err error)) error { - rows, err := pg.query(` - SELECT - alerts.alert_id, - alerts.project_id, - alerts.name, - alerts.description, - alerts.active, - alerts.detection_method, - alerts.query, - CAST(EXTRACT(epoch FROM alerts.deleted_at) * 1000 AS BIGINT) AS deleted_at, - CAST(EXTRACT(epoch FROM alerts.created_at) * 1000 AS BIGINT) AS created_at, - alerts.options, - 0 AS tenant_id - FROM public.alerts - WHERE alerts.active AND alerts.deleted_at ISNULL; - `) - if err != nil { - return err - } - defer rows.Close() - for rows.Next() { - a := new(Alert) - if err = rows.Scan( - &a.AlertID, - &a.ProjectID, - &a.Name, - &a.Description, - &a.Active, - &a.DetectionMethod, - &a.Query, - &a.DeletedAt, - &a.CreatedAt, - &a.Options, - &a.TenantId, - ); err != nil { - iter(nil, err) - continue - } - iter(a, nil) - } - - if err = rows.Err(); err != nil { - return err - } - return nil -} - -func (pg *Conn) SaveLastNotification(allIds []uint32) error { - var paramrefs string - for _, v := range allIds { - paramrefs += strconv.Itoa(int(v)) + `,` - } - paramrefs = paramrefs[:len(paramrefs)-1] // remove last "," - q := "UPDATE public.Alerts SET options = options||'{\"lastNotification\":" + strconv.Itoa(int(time.Now().Unix()*1000)) + "}'::jsonb WHERE alert_id IN (" + paramrefs + ");" - //log.Println(q) - log.Println("Updating PG") - return pg.exec(q) -} - -type columnDefinition struct { - table string - formula string - condition string - group string -} - -var LeftToDb = map[string]columnDefinition{ - "performance.dom_content_loaded.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "COALESCE(AVG(NULLIF(dom_content_loaded_time ,0)),0)"}, - "performance.first_meaningful_paint.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "COALESCE(AVG(NULLIF(first_contentful_paint_time,0)),0)"}, - "performance.page_load_time.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(load_time ,0))"}, - "performance.dom_build_time.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(dom_building_time,0))"}, - "performance.speed_index.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(speed_index,0))"}, - "performance.page_response_time.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(response_time,0))"}, - "performance.ttfb.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(first_paint_time,0))"}, - "performance.time_to_render.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(visually_complete,0))"}, - "performance.image_load_time.average": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(resources.duration,0))", condition: "type='img'"}, - "performance.request_load_time.average": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(resources.duration,0))", condition: "type='fetch'"}, - "resources.load_time.average": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(resources.duration,0))"}, - "resources.missing.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(DISTINCT url_hostpath)", condition: "success= FALSE"}, - "errors.4xx_5xx.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(session_id)", condition: "status/100!=2"}, - "errors.4xx.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(session_id)", condition: "status/100=4"}, - "errors.5xx.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(session_id)", condition: "status/100=5"}, - "errors.javascript.impacted_sessions.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(DISTINCT session_id)", condition: "success= FALSE AND type='script'"}, - "performance.crashes.count": {table: "(SELECT *, start_ts AS timestamp FROM public.sessions WHERE errors_count > 0) AS sessions", formula: "COUNT(DISTINCT session_id)", condition: "errors_count > 0"}, - "errors.javascript.count": {table: "events.errors INNER JOIN public.errors AS m_errors USING (error_id)", formula: "COUNT(DISTINCT session_id)", condition: "source='js_exception'"}, - "errors.backend.count": {table: "events.errors INNER JOIN public.errors AS m_errors USING (error_id)", formula: "COUNT(DISTINCT session_id)", condition: "source!='js_exception'"}, -} - -//This is the frequency of execution for each threshold -var TimeInterval = map[int64]int64{ - 15: 3, - 30: 5, - 60: 10, - 120: 20, - 240: 30, - 1440: 60, -} - -func (a *Alert) CanCheck() bool { - now := time.Now().Unix() * 1000 - var repetitionBase int64 - - if repetitionBase = a.Options.CurrentPeriod; a.DetectionMethod == "change" && a.Options.CurrentPeriod > a.Options.PreviousPeriod { - repetitionBase = a.Options.PreviousPeriod - } - - if _, ok := TimeInterval[repetitionBase]; !ok { - log.Printf("repetitionBase: %d NOT FOUND", repetitionBase) - return false - } - return a.DeletedAt == nil && a.Active && - (a.Options.RenotifyInterval <= 0 || - a.Options.LastNotification <= 0 || - ((now - a.Options.LastNotification) > a.Options.RenotifyInterval*60*1000)) && - ((now-*a.CreatedAt)%(TimeInterval[repetitionBase]*60*1000)) < 60*1000 -} - -func (a *Alert) Build() (sq.SelectBuilder, error) { - colDef, ok := LeftToDb[a.Query.Left] - if !ok { - return sq.Select(), errors.New(fmt.Sprintf("!! unsupported metric '%s' from alert: %d:%s\n", a.Query.Left, a.AlertID, a.Name)) - } - - subQ := sq. - Select(colDef.formula + " AS value"). - From(colDef.table). - Where(sq.And{sq.Expr("project_id = $1 ", a.ProjectID), - sq.Expr(colDef.condition)}) - q := sq.Select(fmt.Sprint("value, coalesce(value,0)", a.Query.Operator, a.Query.Right, " AS valid")) - if len(colDef.group) > 0 { - subQ = subQ.Column(colDef.group + " AS group_value") - subQ = subQ.GroupBy(colDef.group) - q = q.Column("group_value") - } - - if a.DetectionMethod == "threshold" { - q = q.FromSelect(subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60)), "stat") - } else if a.DetectionMethod == "change" { - if a.Options.Change == "change" { - if len(colDef.group) == 0 { - sub1, args1, _ := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60)).ToSql() - sub2, args2, _ := subQ.Where( - sq.And{ - sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60), - sq.Expr("timestamp>=$4 ", time.Now().Unix()-2*a.Options.CurrentPeriod*60), - }).ToSql() - sub1, _, _ = sq.Expr("SELECT ((" + sub1 + ")-(" + sub2 + ")) AS value").ToSql() - q = q.JoinClause("FROM ("+sub1+") AS stat", append(args1, args2...)...) - } else { - subq1 := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60)) - sub2, args2, _ := subQ.Where( - sq.And{ - sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60), - sq.Expr("timestamp>=$4 ", time.Now().Unix()-2*a.Options.CurrentPeriod*60), - }).ToSql() - sub1 := sq.Select("group_value", "(stat1.value-stat2.value) AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...) - q = q.FromSelect(sub1, "stat") - } - } else if a.Options.Change == "percent" { - if len(colDef.group) == 0 { - sub1, args1, _ := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60)).ToSql() - sub2, args2, _ := subQ.Where( - sq.And{ - sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60), - sq.Expr("timestamp>=$4 ", time.Now().Unix()-a.Options.PreviousPeriod*60-a.Options.CurrentPeriod*60), - }).ToSql() - sub1, _, _ = sq.Expr("SELECT ((" + sub1 + ")/(" + sub2 + ")-1)*100 AS value").ToSql() - q = q.JoinClause("FROM ("+sub1+") AS stat", append(args1, args2...)...) - } else { - subq1 := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60)) - sub2, args2, _ := subQ.Where( - sq.And{ - sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60), - sq.Expr("timestamp>=$4 ", time.Now().Unix()-a.Options.PreviousPeriod*60-a.Options.CurrentPeriod*60), - }).ToSql() - sub1 := sq.Select("group_value", "(stat1.value/stat2.value-1)*100 AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...) - q = q.FromSelect(sub1, "stat") - } - } else { - return q, errors.New("unsupported change method") - } - - } else { - return q, errors.New("unsupported detection method") - } - return q, nil -} \ No newline at end of file From 197c39f96ac83200f67d0ff40db86a93373b19eb Mon Sep 17 00:00:00 2001 From: ShiKhu Date: Fri, 1 Apr 2022 19:14:08 +0200 Subject: [PATCH 09/15] feat(backend): go v.1.18 upgrade --- backend/Dockerfile | 2 +- backend/go.mod | 43 +++++- backend/go.sum | 21 +-- ...{messages_common.go => messages-common.go} | 24 --- .../{messages_ios.go => messages-ios.go} | 0 .../{messages_web.go => messages-web.go} | 0 .../pkg/db/cache/{pg_cache.go => pg-cache.go} | 0 backend/pkg/db/postgres/listener.go | 25 ---- ...{messages_common.go => messages-common.go} | 0 .../{messages_ios.go => messages-ios.go} | 0 ...ges_web_stats.go => messages-web-stats.go} | 0 .../{messages_web.go => messages-web.go} | 0 ...tarted_session.go => unstarted-session.go} | 0 .../pkg/env/{worker_id.go => worker-id.go} | 0 .../{get_timestamp.go => get-timestamp.go} | 0 ...ansform.go => legacy-message-transform.go} | 0 .../{read_message.go => read-message.go} | 0 ...s_depricated.go => handlers-depricated.go} | 0 .../http/{handlers_ios.go => handlers-ios.go} | 98 ++++++------- .../http/{handlers_web.go => handlers-web.go} | 0 backend/services/http/ios-device.go | 138 ++++++++++++++++++ backend/services/http/ios_device.go | 79 ---------- backend/services/http/main.go | 18 +-- backend/services/http/project_id.go | 12 -- 24 files changed, 235 insertions(+), 225 deletions(-) rename backend/pkg/db/cache/{messages_common.go => messages-common.go} (66%) rename backend/pkg/db/cache/{messages_ios.go => messages-ios.go} (100%) rename backend/pkg/db/cache/{messages_web.go => messages-web.go} (100%) rename backend/pkg/db/cache/{pg_cache.go => pg-cache.go} (100%) rename backend/pkg/db/postgres/{messages_common.go => messages-common.go} (100%) rename backend/pkg/db/postgres/{messages_ios.go => messages-ios.go} (100%) rename backend/pkg/db/postgres/{messages_web_stats.go => messages-web-stats.go} (100%) rename backend/pkg/db/postgres/{messages_web.go => messages-web.go} (100%) rename backend/pkg/db/postgres/{unstarted_session.go => unstarted-session.go} (100%) rename backend/pkg/env/{worker_id.go => worker-id.go} (100%) rename backend/pkg/messages/{get_timestamp.go => get-timestamp.go} (100%) rename backend/pkg/messages/{legacy_message_transform.go => legacy-message-transform.go} (100%) rename backend/pkg/messages/{read_message.go => read-message.go} (100%) rename backend/services/http/{handlers_depricated.go => handlers-depricated.go} (100%) rename backend/services/http/{handlers_ios.go => handlers-ios.go} (71%) rename backend/services/http/{handlers_web.go => handlers-web.go} (100%) create mode 100644 backend/services/http/ios-device.go delete mode 100644 backend/services/http/ios_device.go delete mode 100644 backend/services/http/project_id.go diff --git a/backend/Dockerfile b/backend/Dockerfile index 5cefd4cb4..4c31518ef 100644 --- a/backend/Dockerfile +++ b/backend/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.13-alpine3.10 AS prepare +FROM golang:1.18-alpine3.15 AS prepare RUN apk add --no-cache git openssh openssl-dev pkgconf gcc g++ make libc-dev bash diff --git a/backend/go.mod b/backend/go.mod index ab98ca444..6588529a8 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -1,14 +1,12 @@ module openreplay/backend -go 1.13 +go 1.18 require ( cloud.google.com/go/logging v1.4.2 github.com/ClickHouse/clickhouse-go v1.4.3 - github.com/Masterminds/squirrel v1.5.0 github.com/aws/aws-sdk-go v1.35.23 github.com/btcsuite/btcutil v1.0.2 - github.com/confluentinc/confluent-kafka-go v1.7.0 // indirect github.com/elastic/go-elasticsearch/v7 v7.13.1 github.com/go-redis/redis v6.15.9+incompatible github.com/google/uuid v1.1.2 @@ -16,14 +14,47 @@ require ( github.com/jackc/pgconn v1.6.0 github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451 github.com/jackc/pgx/v4 v4.6.0 - github.com/klauspost/compress v1.11.9 // indirect github.com/klauspost/pgzip v1.2.5 - github.com/lib/pq v1.2.0 github.com/oschwald/maxminddb-golang v1.7.0 github.com/pkg/errors v0.9.1 github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe + golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 google.golang.org/api v0.50.0 gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0 - +) + +require ( + cloud.google.com/go v0.84.0 // indirect + github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect + github.com/confluentinc/confluent-kafka-go v1.7.0 // indirect + github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/go-cmp v0.5.6 // indirect + github.com/googleapis/gax-go/v2 v2.0.5 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.0.2 // indirect + github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8 // indirect + github.com/jackc/pgtype v1.3.0 // indirect + github.com/jackc/puddle v1.1.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jstemmer/go-junit-report v0.9.1 // indirect + github.com/klauspost/compress v1.11.9 // indirect + go.opencensus.io v0.23.0 // indirect + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect + golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect + golang.org/x/mod v0.4.2 // indirect + golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 // indirect + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect + golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 // indirect + golang.org/x/text v0.3.6 // indirect + golang.org/x/tools v0.1.4 // indirect + golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 // indirect + google.golang.org/grpc v1.38.0 // indirect + google.golang.org/protobuf v1.26.0 // indirect + gopkg.in/yaml.v2 v2.2.8 // indirect ) diff --git a/backend/go.sum b/backend/go.sum index 8d538a0b4..607936204 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -46,8 +46,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/ClickHouse/clickhouse-go v1.4.3 h1:iAFMa2UrQdR5bHJ2/yaSLffZkxpcOYQMCUuKeNXGdqc= github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= -github.com/Masterminds/squirrel v1.5.0 h1:JukIZisrUXadA9pl3rMkjhiamxiB0cXiu+HGp/Y8cY8= -github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/aws/aws-sdk-go v1.35.23 h1:SCP0d0XvyJTDmfnHEQPvBaYi3kea1VNUo7uQmkVgFts= github.com/aws/aws-sdk-go v1.35.23/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= @@ -75,8 +73,8 @@ github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnht github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -github.com/confluentinc/confluent-kafka-go v1.5.2 h1:l+qt+a0Okmq0Bdr1P55IX4fiwFJyg0lZQmfHkAFkv7E= -github.com/confluentinc/confluent-kafka-go v1.5.2/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg= +github.com/confluentinc/confluent-kafka-go v1.7.0 h1:tXh3LWb2Ne0WiU3ng4h5qiGA9XV61rz46w60O+cq8bM= +github.com/confluentinc/confluent-kafka-go v1.7.0/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -93,7 +91,6 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -135,7 +132,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -152,11 +148,9 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -184,7 +178,6 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= @@ -203,7 +196,6 @@ github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye47 github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= -github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= @@ -219,7 +211,6 @@ github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCM github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= github.com/jackc/pgtype v1.3.0 h1:l8JvKrby3RI7Kg3bYEeU9TA4vqC38QDpFCfcrC7KuN0= github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik= -github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o= github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= @@ -254,10 +245,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= -github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= -github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= -github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= @@ -682,8 +669,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/confluentinc/confluent-kafka-go.v1 v1.5.2 h1:g0WBLy6fobNUU8W/e9zx6I0Yl79Ya+BDW1NwzAlTiiQ= -gopkg.in/confluentinc/confluent-kafka-go.v1 v1.5.2/go.mod h1:ZdI3yfYmdNSLQPNCpO1y00EHyWaHG5EnQEyL/ntAegY= +gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0 h1:+RlmciBLDd/XwM1iudiG3HtCg45purnsOxEoY/+JZdQ= +gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0/go.mod h1:ZdI3yfYmdNSLQPNCpO1y00EHyWaHG5EnQEyL/ntAegY= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= diff --git a/backend/pkg/db/cache/messages_common.go b/backend/pkg/db/cache/messages-common.go similarity index 66% rename from backend/pkg/db/cache/messages_common.go rename to backend/pkg/db/cache/messages-common.go index 3983982fe..8ca7b2f85 100644 --- a/backend/pkg/db/cache/messages_common.go +++ b/backend/pkg/db/cache/messages-common.go @@ -28,30 +28,6 @@ func (c *PGCache) InsertIssueEvent(sessionID uint64, crash *IssueEvent) error { return c.Conn.InsertIssueEvent(sessionID, session.ProjectID, crash) } -func (c *PGCache) InsertUserID(sessionID uint64, userID *IOSUserID) error { - if err := c.Conn.InsertIOSUserID(sessionID, userID); err != nil { - return err - } - session, err := c.GetSession(sessionID) - if err != nil { - return err - } - session.UserID = &userID.Value - return nil -} - -func (c *PGCache) InsertUserAnonymousID(sessionID uint64, userAnonymousID *IOSUserAnonymousID) error { - if err := c.Conn.InsertIOSUserAnonymousID(sessionID, userAnonymousID); err != nil { - return err - } - session, err := c.GetSession(sessionID) - if err != nil { - return err - } - session.UserAnonymousID = &userAnonymousID.Value - return nil -} - func (c *PGCache) InsertMetadata(sessionID uint64, metadata *Metadata) error { session, err := c.GetSession(sessionID) if err != nil { diff --git a/backend/pkg/db/cache/messages_ios.go b/backend/pkg/db/cache/messages-ios.go similarity index 100% rename from backend/pkg/db/cache/messages_ios.go rename to backend/pkg/db/cache/messages-ios.go diff --git a/backend/pkg/db/cache/messages_web.go b/backend/pkg/db/cache/messages-web.go similarity index 100% rename from backend/pkg/db/cache/messages_web.go rename to backend/pkg/db/cache/messages-web.go diff --git a/backend/pkg/db/cache/pg_cache.go b/backend/pkg/db/cache/pg-cache.go similarity index 100% rename from backend/pkg/db/cache/pg_cache.go rename to backend/pkg/db/cache/pg-cache.go diff --git a/backend/pkg/db/postgres/listener.go b/backend/pkg/db/postgres/listener.go index f90d83485..ef99c2c59 100644 --- a/backend/pkg/db/postgres/listener.go +++ b/backend/pkg/db/postgres/listener.go @@ -11,7 +11,6 @@ import ( type Listener struct { conn *pgx.Conn Integrations chan *Integration - Alerts chan *Alert Errors chan error } @@ -32,23 +31,6 @@ func NewIntegrationsListener(url string) (*Listener, error) { return listener, nil } -func NewAlertsListener(url string) (*Listener, error) { - conn, err := pgx.Connect(context.Background(), url) - if err != nil { - return nil, err - } - listener := &Listener{ - conn: conn, - Errors: make(chan error), - } - listener.Alerts = make(chan *Alert, 50) - if _, err := conn.Exec(context.Background(), "LISTEN alert"); err != nil { - return nil, err - } - go listener.listen() - return listener, nil -} - func (listener *Listener) listen() { for { notification, err := listener.conn.WaitForNotification(context.Background()) @@ -64,13 +46,6 @@ func (listener *Listener) listen() { } else { listener.Integrations <- integrationP } - case "alert": - alertP := new(Alert) - if err := json.Unmarshal([]byte(notification.Payload), alertP); err != nil { - listener.Errors <- fmt.Errorf("%v | Payload: %v", err, notification.Payload) - } else { - listener.Alerts <- alertP - } } } } diff --git a/backend/pkg/db/postgres/messages_common.go b/backend/pkg/db/postgres/messages-common.go similarity index 100% rename from backend/pkg/db/postgres/messages_common.go rename to backend/pkg/db/postgres/messages-common.go diff --git a/backend/pkg/db/postgres/messages_ios.go b/backend/pkg/db/postgres/messages-ios.go similarity index 100% rename from backend/pkg/db/postgres/messages_ios.go rename to backend/pkg/db/postgres/messages-ios.go diff --git a/backend/pkg/db/postgres/messages_web_stats.go b/backend/pkg/db/postgres/messages-web-stats.go similarity index 100% rename from backend/pkg/db/postgres/messages_web_stats.go rename to backend/pkg/db/postgres/messages-web-stats.go diff --git a/backend/pkg/db/postgres/messages_web.go b/backend/pkg/db/postgres/messages-web.go similarity index 100% rename from backend/pkg/db/postgres/messages_web.go rename to backend/pkg/db/postgres/messages-web.go diff --git a/backend/pkg/db/postgres/unstarted_session.go b/backend/pkg/db/postgres/unstarted-session.go similarity index 100% rename from backend/pkg/db/postgres/unstarted_session.go rename to backend/pkg/db/postgres/unstarted-session.go diff --git a/backend/pkg/env/worker_id.go b/backend/pkg/env/worker-id.go similarity index 100% rename from backend/pkg/env/worker_id.go rename to backend/pkg/env/worker-id.go diff --git a/backend/pkg/messages/get_timestamp.go b/backend/pkg/messages/get-timestamp.go similarity index 100% rename from backend/pkg/messages/get_timestamp.go rename to backend/pkg/messages/get-timestamp.go diff --git a/backend/pkg/messages/legacy_message_transform.go b/backend/pkg/messages/legacy-message-transform.go similarity index 100% rename from backend/pkg/messages/legacy_message_transform.go rename to backend/pkg/messages/legacy-message-transform.go diff --git a/backend/pkg/messages/read_message.go b/backend/pkg/messages/read-message.go similarity index 100% rename from backend/pkg/messages/read_message.go rename to backend/pkg/messages/read-message.go diff --git a/backend/services/http/handlers_depricated.go b/backend/services/http/handlers-depricated.go similarity index 100% rename from backend/services/http/handlers_depricated.go rename to backend/services/http/handlers-depricated.go diff --git a/backend/services/http/handlers_ios.go b/backend/services/http/handlers-ios.go similarity index 71% rename from backend/services/http/handlers_ios.go rename to backend/services/http/handlers-ios.go index 6c3f945bd..affcab59d 100644 --- a/backend/services/http/handlers_ios.go +++ b/backend/services/http/handlers-ios.go @@ -2,50 +2,50 @@ package main import ( "encoding/json" - "net/http" "errors" - "time" - "math/rand" - "strconv" "log" + "math/rand" + "net/http" + "strconv" + "time" "openreplay/backend/pkg/db/postgres" - "openreplay/backend/pkg/token" . "openreplay/backend/pkg/messages" + "openreplay/backend/pkg/token" ) -const FILES_SIZE_LIMIT int64 = 1e7 // 10Mb +const FILES_SIZE_LIMIT int64 = 1e7 // 10Mb func startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) { type request struct { - Token string `json:"token"` - ProjectKey *string `json:"projectKey"` - TrackerVersion string `json:"trackerVersion"` - RevID string `json:"revID"` - UserUUID *string `json:"userUUID"` + Token string `json:"token"` + ProjectKey *string `json:"projectKey"` + TrackerVersion string `json:"trackerVersion"` + RevID string `json:"revID"` + UserUUID *string `json:"userUUID"` //UserOS string `json"userOS"` //hardcoded 'MacOS' - UserOSVersion string `json:"userOSVersion"` - UserDevice string `json:"userDevice"` - Timestamp uint64 `json:"timestamp"` + UserOSVersion string `json:"userOSVersion"` + UserDevice string `json:"userDevice"` + Timestamp uint64 `json:"timestamp"` // UserDeviceType uint 0:phone 1:pad 2:tv 3:carPlay 5:mac // “performances”:{ - // “activeProcessorCount”:8, - // “isLowPowerModeEnabled”:0, - // “orientation”:0, - // “systemUptime”:585430, - // “batteryState”:0, - // “thermalState”:0, - // “batteryLevel”:0, - // “processorCount”:8, - // “physicalMemory”:17179869184 - // }, + // “activeProcessorCount”:8, + // “isLowPowerModeEnabled”:0, + // “orientation”:0, + // “systemUptime”:585430, + // “batteryState”:0, + // “thermalState”:0, + // “batteryLevel”:0, + // “processorCount”:8, + // “physicalMemory”:17179869184 + // }, } type response struct { - Token string `json:"token"` - ImagesHashList []string `json:"imagesHashList"` - UserUUID string `json:"userUUID"` - BeaconSizeLimit int64 `json:"beaconSizeLimit"` - SessionID string `json:"sessionID"` + Token string `json:"token"` + ImagesHashList []string `json:"imagesHashList"` + UserUUID string `json:"userUUID"` + BeaconSizeLimit int64 `json:"beaconSizeLimit"` + SessionID string `json:"sessionID"` } startTime := time.Now() req := &request{} @@ -98,16 +98,16 @@ func startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) { // The difference with web is mostly here: producer.Produce(TOPIC_RAW_IOS, tokenData.ID, Encode(&IOSSessionStart{ - Timestamp: req.Timestamp, - ProjectID: uint64(p.ProjectID), - TrackerVersion: req.TrackerVersion, - RevID: req.RevID, - UserUUID: userUUID, - UserOS: "IOS", - UserOSVersion: req.UserOSVersion, - UserDevice: MapIOSDevice(req.UserDevice), - UserDeviceType: GetIOSDeviceType(req.UserDevice), - UserCountry: country, + Timestamp: req.Timestamp, + ProjectID: uint64(p.ProjectID), + TrackerVersion: req.TrackerVersion, + RevID: req.RevID, + UserUUID: userUUID, + UserOS: "IOS", + UserOSVersion: req.UserOSVersion, + UserDevice: MapIOSDevice(req.UserDevice), + UserDeviceType: GetIOSDeviceType(req.UserDevice), + UserCountry: country, })) } @@ -119,14 +119,13 @@ func startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) { responseWithJSON(w, &response{ // ImagesHashList: imagesHashList, - Token: tokenizer.Compose(*tokenData), - UserUUID: userUUID, - SessionID: strconv.FormatUint(tokenData.ID, 10), + Token: tokenizer.Compose(*tokenData), + UserUUID: userUUID, + SessionID: strconv.FormatUint(tokenData.ID, 10), BeaconSizeLimit: BEACON_SIZE_LIMIT, }) } - func pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) { sessionData, err := tokenizer.ParseFromHTTPRequest(r) if err != nil { @@ -136,8 +135,6 @@ func pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) { pushMessages(w, r, sessionData.ID, TOPIC_RAW_IOS) } - - func pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) { sessionData, err := tokenizer.ParseFromHTTPRequest(r) if err != nil && err != token.EXPIRED { @@ -145,10 +142,9 @@ func pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) { return } // Check timestamps here? - pushMessages(w, r, sessionData.ID,TOPIC_RAW_IOS) + pushMessages(w, r, sessionData.ID, TOPIC_RAW_IOS) } - func imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) { log.Printf("recieved imagerequest") @@ -163,12 +159,12 @@ func imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) { err = r.ParseMultipartForm(1e6) // ~1Mb if err == http.ErrNotMultipart || err == http.ErrMissingBoundary { responseWithError(w, http.StatusUnsupportedMediaType, err) - // } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB + // } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB } else if err != nil { responseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging } - if (r.MultipartForm == nil) { + if r.MultipartForm == nil { responseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed")) } @@ -177,7 +173,7 @@ func imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) { return } - prefix := r.MultipartForm.Value["projectKey"][0] + "/" + strconv.FormatUint(sessionData.ID, 10) + "/" + prefix := r.MultipartForm.Value["projectKey"][0] + "/" + strconv.FormatUint(sessionData.ID, 10) + "/" for _, fileHeaderList := range r.MultipartForm.File { for _, fileHeader := range fileHeaderList { @@ -187,7 +183,7 @@ func imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) { } key := prefix + fileHeader.Filename log.Printf("Uploading image... %v", key) - go func() { //TODO: mime type from header + go func() { //TODO: mime type from header if err := s3.Upload(file, key, "image/jpeg", false); err != nil { log.Printf("Upload ios screen error. %v", err) } diff --git a/backend/services/http/handlers_web.go b/backend/services/http/handlers-web.go similarity index 100% rename from backend/services/http/handlers_web.go rename to backend/services/http/handlers-web.go diff --git a/backend/services/http/ios-device.go b/backend/services/http/ios-device.go new file mode 100644 index 000000000..bec1f3b36 --- /dev/null +++ b/backend/services/http/ios-device.go @@ -0,0 +1,138 @@ +package main + +import ( + "strings" +) + +func MapIOSDevice(identifier string) string { + switch identifier { + case "iPod5,1": + return "iPod touch (5th generation)" + case "iPod7,1": + return "iPod touch (6th generation)" + case "iPod9,1": + return "iPod touch (7th generation)" + case "iPhone3,1", "iPhone3,2", "iPhone3,3": + return "iPhone 4" + case "iPhone4,1": + return "iPhone 4s" + case "iPhone5,1", "iPhone5,2": + return "iPhone 5" + case "iPhone5,3", "iPhone5,4": + return "iPhone 5c" + case "iPhone6,1", "iPhone6,2": + return "iPhone 5s" + case "iPhone7,2": + return "iPhone 6" + case "iPhone7,1": + return "iPhone 6 Plus" + case "iPhone8,1": + return "iPhone 6s" + case "iPhone8,2": + return "iPhone 6s Plus" + case "iPhone8,4": + return "iPhone SE" + case "iPhone9,1", "iPhone9,3": + return "iPhone 7" + case "iPhone9,2", "iPhone9,4": + return "iPhone 7 Plus" + case "iPhone10,1", "iPhone10,4": + return "iPhone 8" + case "iPhone10,2", "iPhone10,5": + return "iPhone 8 Plus" + case "iPhone10,3", "iPhone10,6": + return "iPhone X" + case "iPhone11,2": + return "iPhone XS" + case "iPhone11,4", "iPhone11,6": + return "iPhone XS Max" + case "iPhone11,8": + return "iPhone XR" + case "iPhone12,1": + return "iPhone 11" + case "iPhone12,3": + return "iPhone 11 Pro" + case "iPhone12,5": + return "iPhone 11 Pro Max" + case "iPhone12,8": + return "iPhone SE (2nd generation)" + case "iPhone13,1": + return "iPhone 12 mini" + case "iPhone13,2": + return "iPhone 12" + case "iPhone13,3": + return "iPhone 12 Pro" + case "iPhone13,4": + return "iPhone 12 Pro Max" + case "iPad2,1", "iPad2,2", "iPad2,3", "iPad2,4": + return "iPad 2" + case "iPad3,1", "iPad3,2", "iPad3,3": + return "iPad (3rd generation)" + case "iPad3,4", "iPad3,5", "iPad3,6": + return "iPad (4th generation)" + case "iPad6,11", "iPad6,12": + return "iPad (5th generation)" + case "iPad7,5", "iPad7,6": + return "iPad (6th generation)" + case "iPad7,11", "iPad7,12": + return "iPad (7th generation)" + case "iPad11,6", "iPad11,7": + return "iPad (8th generation)" + case "iPad4,1", "iPad4,2", "iPad4,3": + return "iPad Air" + case "iPad5,3", "iPad5,4": + return "iPad Air 2" + case "iPad11,3", "iPad11,4": + return "iPad Air (3rd generation)" + case "iPad13,1", "iPad13,2": + return "iPad Air (4th generation)" + case "iPad2,5", "iPad2,6", "iPad2,7": + return "iPad mini" + case "iPad4,4", "iPad4,5", "iPad4,6": + return "iPad mini 2" + case "iPad4,7", "iPad4,8", "iPad4,9": + return "iPad mini 3" + case "iPad5,1", "iPad5,2": + return "iPad mini 4" + case "iPad11,1", "iPad11,2": + return "iPad mini (5th generation)" + case "iPad6,3", "iPad6,4": + return "iPad Pro (9.7-inch)" + case "iPad7,3", "iPad7,4": + return "iPad Pro (10.5-inch)" + case "iPad8,1", "iPad8,2", "iPad8,3", "iPad8,4": + return "iPad Pro (11-inch) (1st generation)" + case "iPad8,9", "iPad8,10": + return "iPad Pro (11-inch) (2nd generation)" + case "iPad6,7", "iPad6,8": + return "iPad Pro (12.9-inch) (1st generation)" + case "iPad7,1", "iPad7,2": + return "iPad Pro (12.9-inch) (2nd generation)" + case "iPad8,5", "iPad8,6", "iPad8,7", "iPad8,8": + return "iPad Pro (12.9-inch) (3rd generation)" + case "iPad8,11", "iPad8,12": + return "iPad Pro (12.9-inch) (4th generation)" + case "AppleTV5,3": + return "Apple TV" + case "AppleTV6,2": + return "Apple TV 4K" + case "AudioAccessory1,1": + return "HomePod" + case "AudioAccessory5,1": + return "HomePod mini" + case "i386", "x86_64": + return "Simulator" + default: + return identifier + } +} + +func GetIOSDeviceType(identifier string) string { + if strings.Contains(identifier, "iPhone") { + return "mobile" //"phone" + } + if strings.Contains(identifier, "iPad") { + return "tablet" + } + return "other" +} diff --git a/backend/services/http/ios_device.go b/backend/services/http/ios_device.go deleted file mode 100644 index 2c3474157..000000000 --- a/backend/services/http/ios_device.go +++ /dev/null @@ -1,79 +0,0 @@ -package main - -import ( - "strings" -) - -func MapIOSDevice(identifier string) string { - switch identifier { - case "iPod5,1": return "iPod touch (5th generation)" - case "iPod7,1": return "iPod touch (6th generation)" - case "iPod9,1": return "iPod touch (7th generation)" - case "iPhone3,1", "iPhone3,2", "iPhone3,3": return "iPhone 4" - case "iPhone4,1": return "iPhone 4s" - case "iPhone5,1", "iPhone5,2": return "iPhone 5" - case "iPhone5,3", "iPhone5,4": return "iPhone 5c" - case "iPhone6,1", "iPhone6,2": return "iPhone 5s" - case "iPhone7,2": return "iPhone 6" - case "iPhone7,1": return "iPhone 6 Plus" - case "iPhone8,1": return "iPhone 6s" - case "iPhone8,2": return "iPhone 6s Plus" - case "iPhone8,4": return "iPhone SE" - case "iPhone9,1", "iPhone9,3": return "iPhone 7" - case "iPhone9,2", "iPhone9,4": return "iPhone 7 Plus" - case "iPhone10,1", "iPhone10,4": return "iPhone 8" - case "iPhone10,2", "iPhone10,5": return "iPhone 8 Plus" - case "iPhone10,3", "iPhone10,6": return "iPhone X" - case "iPhone11,2": return "iPhone XS" - case "iPhone11,4", "iPhone11,6": return "iPhone XS Max" - case "iPhone11,8": return "iPhone XR" - case "iPhone12,1": return "iPhone 11" - case "iPhone12,3": return "iPhone 11 Pro" - case "iPhone12,5": return "iPhone 11 Pro Max" - case "iPhone12,8": return "iPhone SE (2nd generation)" - case "iPhone13,1": return "iPhone 12 mini" - case "iPhone13,2": return "iPhone 12" - case "iPhone13,3": return "iPhone 12 Pro" - case "iPhone13,4": return "iPhone 12 Pro Max" - case "iPad2,1", "iPad2,2", "iPad2,3", "iPad2,4":return "iPad 2" - case "iPad3,1", "iPad3,2", "iPad3,3": return "iPad (3rd generation)" - case "iPad3,4", "iPad3,5", "iPad3,6": return "iPad (4th generation)" - case "iPad6,11", "iPad6,12": return "iPad (5th generation)" - case "iPad7,5", "iPad7,6": return "iPad (6th generation)" - case "iPad7,11", "iPad7,12": return "iPad (7th generation)" - case "iPad11,6", "iPad11,7": return "iPad (8th generation)" - case "iPad4,1", "iPad4,2", "iPad4,3": return "iPad Air" - case "iPad5,3", "iPad5,4": return "iPad Air 2" - case "iPad11,3", "iPad11,4": return "iPad Air (3rd generation)" - case "iPad13,1", "iPad13,2": return "iPad Air (4th generation)" - case "iPad2,5", "iPad2,6", "iPad2,7": return "iPad mini" - case "iPad4,4", "iPad4,5", "iPad4,6": return "iPad mini 2" - case "iPad4,7", "iPad4,8", "iPad4,9": return "iPad mini 3" - case "iPad5,1", "iPad5,2": return "iPad mini 4" - case "iPad11,1", "iPad11,2": return "iPad mini (5th generation)" - case "iPad6,3", "iPad6,4": return "iPad Pro (9.7-inch)" - case "iPad7,3", "iPad7,4": return "iPad Pro (10.5-inch)" - case "iPad8,1", "iPad8,2", "iPad8,3", "iPad8,4":return "iPad Pro (11-inch) (1st generation)" - case "iPad8,9", "iPad8,10": return "iPad Pro (11-inch) (2nd generation)" - case "iPad6,7", "iPad6,8": return "iPad Pro (12.9-inch) (1st generation)" - case "iPad7,1", "iPad7,2": return "iPad Pro (12.9-inch) (2nd generation)" - case "iPad8,5", "iPad8,6", "iPad8,7", "iPad8,8":return "iPad Pro (12.9-inch) (3rd generation)" - case "iPad8,11", "iPad8,12": return "iPad Pro (12.9-inch) (4th generation)" - case "AppleTV5,3": return "Apple TV" - case "AppleTV6,2": return "Apple TV 4K" - case "AudioAccessory1,1": return "HomePod" - case "AudioAccessory5,1": return "HomePod mini" - case "i386", "x86_64": return "Simulator" - default: return identifier - } -} - -func GetIOSDeviceType(identifier string) string { - if strings.Contains(identifier, "iPhone") { - return "mobile" //"phone" - } - if strings.Contains(identifier, "iPad") { - return "tablet" - } - return "other" -} \ No newline at end of file diff --git a/backend/services/http/main.go b/backend/services/http/main.go index 8ed8b6d95..f0ae22d32 100644 --- a/backend/services/http/main.go +++ b/backend/services/http/main.go @@ -10,19 +10,17 @@ import ( "golang.org/x/net/http2" - + "openreplay/backend/pkg/db/cache" + "openreplay/backend/pkg/db/postgres" "openreplay/backend/pkg/env" "openreplay/backend/pkg/flakeid" "openreplay/backend/pkg/queue" "openreplay/backend/pkg/queue/types" "openreplay/backend/pkg/storage" - "openreplay/backend/pkg/db/postgres" - "openreplay/backend/pkg/db/cache" - "openreplay/backend/pkg/url/assets" "openreplay/backend/pkg/token" + "openreplay/backend/pkg/url/assets" "openreplay/backend/services/http/geoip" "openreplay/backend/services/http/uaparser" - ) var rewriter *assets.Rewriter @@ -38,6 +36,7 @@ var TOPIC_RAW_WEB string var TOPIC_RAW_IOS string var TOPIC_CACHE string var TOPIC_TRIGGER string + //var TOPIC_ANALYTICS string var CACHE_ASSESTS bool var BEACON_SIZE_LIMIT int64 @@ -53,7 +52,7 @@ func main() { TOPIC_TRIGGER = env.String("TOPIC_TRIGGER") //TOPIC_ANALYTICS = env.String("TOPIC_ANALYTICS") rewriter = assets.NewRewriter(env.String("ASSETS_ORIGIN")) - pgconn = cache.NewPGCache(postgres.NewConn(env.String("POSTGRES_STRING")), 1000 * 60 * 20) + pgconn = cache.NewPGCache(postgres.NewConn(env.String("POSTGRES_STRING")), 1000*60*20) defer pgconn.Close() s3 = storage.NewS3(env.String("AWS_REGION"), env.String("S3_BUCKET_IOS_IMAGES")) tokenizer = token.NewTokenizer(env.String("TOKEN_SECRET")) @@ -70,7 +69,7 @@ func main() { Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // TODO: agree with specification - w.Header().Set("Access-Control-Allow-Origin", "*") + w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Methods", "POST") w.Header().Set("Access-Control-Allow-Headers", "Content-Type,Authorization") if r.Method == http.MethodOptions { @@ -79,13 +78,12 @@ func main() { return } - log.Printf("Request: %v - %v ", r.Method, r.URL.Path) - + log.Printf("Request: %v - %v ", r.Method, r.URL.Path) switch r.URL.Path { case "/": w.WriteHeader(http.StatusOK) - case "/v1/web/not-started": + case "/v1/web/not-started": switch r.Method { case http.MethodPost: notStartedHandlerWeb(w, r) diff --git a/backend/services/http/project_id.go b/backend/services/http/project_id.go deleted file mode 100644 index 059576fe8..000000000 --- a/backend/services/http/project_id.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -func decodeProjectID(projectID uint64) uint64 { - if projectID < 0x10000000000000 || projectID >= 0x20000000000000 { - return 0 - } - projectID = (projectID - 0x10000000000000) * 4212451012670231 & 0xfffffffffffff - if projectID > 0xffffffff { - return 0 - } - return projectID -} From 6196e79d000f69a50b0b5e2e80d7298922b8e2a0 Mon Sep 17 00:00:00 2001 From: ShiKhu Date: Fri, 1 Apr 2022 19:16:24 +0200 Subject: [PATCH 10/15] feat(backend): everyday update of an assets' version --- backend/pkg/url/assets/url.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/backend/pkg/url/assets/url.go b/backend/pkg/url/assets/url.go index 1fe717531..b55921149 100644 --- a/backend/pkg/url/assets/url.go +++ b/backend/pkg/url/assets/url.go @@ -5,11 +5,18 @@ import ( "path/filepath" "strconv" "strings" + "time" + + "openreplay/backend/pkg/flakeid" ) func getSessionKey(sessionID uint64) string { - // Based on timestamp, changes once per week. Check pkg/flakeid for understanding sessionID - return strconv.FormatUint(sessionID>>50, 10) + return strconv.FormatUint( + uint64(time.UnixMilli( + int64(flakeid.ExtractTimestamp(sessionID)), + ).Weekday()), + 10, + ) } func ResolveURL(baseurl string, rawurl string) string { From c8872064ec3c35f455c4d435a3a9e3826e19d34d Mon Sep 17 00:00:00 2001 From: ShiKhu Date: Fri, 1 Apr 2022 19:43:46 +0200 Subject: [PATCH 11/15] code(backend): use 1.18 time features --- backend/pkg/token/tokenizer.go | 12 +- backend/pkg/utime/utime.go | 11 -- backend/services/ender/builder/builder.go | 4 +- backend/services/ender/main.go | 2 +- backend/services/http/handlers-ios.go | 4 +- backend/services/http/handlers-web.go | 8 +- .../integrations/integration/bugsnag.go | 32 +++--- .../integrations/integration/client.go | 37 +++--- .../integrations/integration/datadog.go | 43 ++++--- .../integrations/integration/elasticsearch.go | 3 +- .../integrations/integration/newrelic.go | 32 +++--- .../integrations/integration/sentry.go | 49 ++++---- .../integrations/integration/stackdriver.go | 105 +++++++++--------- .../integrations/integration/sumologic.go | 53 +++++---- ee/backend/pkg/kafka/consumer.go | 2 +- 15 files changed, 184 insertions(+), 213 deletions(-) delete mode 100644 backend/pkg/utime/utime.go diff --git a/backend/pkg/token/tokenizer.go b/backend/pkg/token/tokenizer.go index 3f1069a63..f61e1f145 100644 --- a/backend/pkg/token/tokenizer.go +++ b/backend/pkg/token/tokenizer.go @@ -22,8 +22,8 @@ func NewTokenizer(secret string) *Tokenizer { } type TokenData struct { - ID uint64 - ExpTime int64 + ID uint64 + ExpTime int64 } func (tokenizer *Tokenizer) sign(body string) []byte { @@ -33,7 +33,7 @@ func (tokenizer *Tokenizer) sign(body string) []byte { } func (tokenizer *Tokenizer) Compose(d TokenData) string { - body := strconv.FormatUint(d.ID, 36) + + body := strconv.FormatUint(d.ID, 36) + "." + strconv.FormatInt(d.ExpTime, 36) sign := base58.Encode(tokenizer.sign(body)) return body + "." + sign @@ -58,8 +58,8 @@ func (tokenizer *Tokenizer) Parse(token string) (*TokenData, error) { if err != nil { return nil, err } - if expTime <= time.Now().UnixNano()/1e6 { - return &TokenData{id,expTime}, EXPIRED + if expTime <= time.Now().UnixMilli() { + return &TokenData{id, expTime}, EXPIRED } - return &TokenData{id,expTime}, nil + return &TokenData{id, expTime}, nil } diff --git a/backend/pkg/utime/utime.go b/backend/pkg/utime/utime.go deleted file mode 100644 index e3b5a2751..000000000 --- a/backend/pkg/utime/utime.go +++ /dev/null @@ -1,11 +0,0 @@ -package utime - -import "time" - -func CurrentTimestamp() int64 { - return time.Now().UnixNano() / 1e6 -} - -func ToMilliseconds(t time.Time) int64 { - return t.UnixNano() / 1e6 -} diff --git a/backend/services/ender/builder/builder.go b/backend/services/ender/builder/builder.go index e36bdcbe3..9c2067985 100644 --- a/backend/services/ender/builder/builder.go +++ b/backend/services/ender/builder/builder.go @@ -110,11 +110,11 @@ func (b *builder) buildInputEvent() { func (b *builder) handleMessage(message Message, messageID uint64) { timestamp := GetTimestamp(message) - if b.timestamp <= timestamp { // unnecessary? TODO: test and remove + if b.timestamp < timestamp { // unnecessary? TODO: test and remove b.timestamp = timestamp } - b.lastProcessedTimestamp = time.Now().UnixNano() / 1e6 + b.lastProcessedTimestamp = time.Now().UnixMilli() // Might happen before the first timestamp. switch msg := message.(type) { diff --git a/backend/services/ender/main.go b/backend/services/ender/main.go index f0f139dce..f2430f3a0 100644 --- a/backend/services/ender/main.go +++ b/backend/services/ender/main.go @@ -56,7 +56,7 @@ func main() { consumer.Close() os.Exit(0) case <-tick: - builderMap.IterateReadyMessages(time.Now().UnixNano()/1e6, func(sessionID uint64, readyMsg messages.Message) { + builderMap.IterateReadyMessages(time.Now().UnixMilli(), func(sessionID uint64, readyMsg messages.Message) { producer.Produce(TOPIC_TRIGGER, sessionID, messages.Encode(readyMsg)) }) // TODO: why exactly do we need Flush here and not in any other place? diff --git a/backend/services/http/handlers-ios.go b/backend/services/http/handlers-ios.go index affcab59d..f15a6af60 100644 --- a/backend/services/http/handlers-ios.go +++ b/backend/services/http/handlers-ios.go @@ -85,14 +85,14 @@ func startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) { responseWithError(w, http.StatusForbidden, errors.New("browser not recognized")) return } - sessionID, err := flaker.Compose(uint64(startTime.UnixNano() / 1e6)) + sessionID, err := flaker.Compose(uint64(startTime.UnixMilli())) if err != nil { responseWithError(w, http.StatusInternalServerError, err) return } // TODO: if EXPIRED => send message for two sessions association expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond) - tokenData = &token.TokenData{sessionID, expTime.UnixNano() / 1e6} + tokenData = &token.TokenData{sessionID, expTime.UnixMilli()} country := geoIP.ExtractISOCodeFromHTTPRequest(r) diff --git a/backend/services/http/handlers-web.go b/backend/services/http/handlers-web.go index 6020c3eb1..29dcf161d 100644 --- a/backend/services/http/handlers-web.go +++ b/backend/services/http/handlers-web.go @@ -76,14 +76,14 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) { responseWithError(w, http.StatusForbidden, errors.New("browser not recognized")) return } - sessionID, err := flaker.Compose(uint64(startTime.UnixNano() / 1e6)) + sessionID, err := flaker.Compose(uint64(startTime.UnixMilli())) if err != nil { responseWithError(w, http.StatusInternalServerError, err) return } // TODO: if EXPIRED => send message for two sessions association expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond) - tokenData = &token.TokenData{sessionID, expTime.UnixNano() / 1e6} + tokenData = &token.TokenData{sessionID, expTime.UnixMilli()} country := geoIP.ExtractISOCodeFromHTTPRequest(r) producer.Produce(TOPIC_RAW_WEB, tokenData.ID, Encode(&SessionStart{ @@ -108,8 +108,8 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) { //delayDuration := time.Now().Sub(startTime) responseWithJSON(w, &response{ - //Timestamp: startTime.UnixNano() / 1e6, - //Delay: delayDuration.Nanoseconds() / 1e6, + //Timestamp: startTime.UnixMilli(), + //Delay: delayDuration.Milliseconds(), Token: tokenizer.Compose(*tokenData), UserUUID: userUUID, SessionID: strconv.FormatUint(tokenData.ID, 10), diff --git a/backend/services/integrations/integration/bugsnag.go b/backend/services/integrations/integration/bugsnag.go index 7c31db3cb..118cdb84d 100644 --- a/backend/services/integrations/integration/bugsnag.go +++ b/backend/services/integrations/integration/bugsnag.go @@ -1,15 +1,14 @@ package integration import ( + "encoding/json" "fmt" + "io" + "io/ioutil" "net/http" - "encoding/json" "net/url" "time" - "io" - "io/ioutil" - "openreplay/backend/pkg/utime" "openreplay/backend/pkg/messages" ) @@ -18,15 +17,14 @@ import ( */ type bugsnag struct { - BugsnagProjectId string // `json:"bugsnag_project_id"` + BugsnagProjectId string // `json:"bugsnag_project_id"` AuthorizationToken string // `json:"auth_token"` } - type bugsnagEvent struct { MetaData struct { SpecialInfo struct { - AsayerSessionId uint64 `json:"asayerSessionId,string"` + AsayerSessionId uint64 `json:"asayerSessionId,string"` OpenReplaySessionToken string `json:"openReplaySessionToken"` } `json:"special_info"` } `json:"metaData"` @@ -38,7 +36,7 @@ type bugsnagEvent struct { func (b *bugsnag) Request(c *client) error { sinceTs := c.getLastMessageTimestamp() + 1000 // From next second - sinceFormatted := time.Unix(0, int64(sinceTs*1e6)).Format(time.RFC3339) + sinceFormatted := time.UnixMilli(int64(sinceTs)).Format(time.RFC3339) requestURL := fmt.Sprintf("https://api.bugsnag.com/projects/%v/events", b.BugsnagProjectId) req, err := http.NewRequest("GET", requestURL, nil) if err != nil { @@ -47,10 +45,10 @@ func (b *bugsnag) Request(c *client) error { q := req.URL.Query() // q.Add("per_page", "100") // Up to a maximum of 30. Default: 30 // q.Add("sort", "timestamp") // Default: timestamp (timestamp == ReceivedAt ??) - q.Add("direction", "asc") // Default: desc + q.Add("direction", "asc") // Default: desc q.Add("full_reports", "true") // Default: false - q.Add("filters[event.since][][type]", "eq") - q.Add("filters[event.since][][value]", sinceFormatted) // seems like inclusively + q.Add("filters[event.since][][type]", "eq") + q.Add("filters[event.since][][value]", sinceFormatted) // seems like inclusively req.URL.RawQuery = q.Encode() authToken := "token " + b.AuthorizationToken @@ -85,7 +83,7 @@ func (b *bugsnag) Request(c *client) error { } sessionID := e.MetaData.SpecialInfo.AsayerSessionId token := e.MetaData.SpecialInfo.OpenReplaySessionToken - if sessionID == 0 && token == "" { + if sessionID == 0 && token == "" { // c.errChan <- "No AsayerSessionId found. | Message: %v", e continue } @@ -94,16 +92,16 @@ func (b *bugsnag) Request(c *client) error { c.errChan <- err continue } - timestamp := uint64(utime.ToMilliseconds(parsedTime)) + timestamp := uint64(parsedTime.UnixMilli()) c.setLastMessageTimestamp(timestamp) c.evChan <- &SessionErrorEvent{ SessionID: sessionID, - Token: token, + Token: token, RawErrorEvent: &messages.RawErrorEvent{ - Source: "bugsnag", + Source: "bugsnag", Timestamp: timestamp, - Name: e.Exceptions[0].Message, - Payload: string(jsonEvent), + Name: e.Exceptions[0].Message, + Payload: string(jsonEvent), }, } } diff --git a/backend/services/integrations/integration/client.go b/backend/services/integrations/integration/client.go index 2abf9913d..315bfe4e9 100644 --- a/backend/services/integrations/integration/client.go +++ b/backend/services/integrations/integration/client.go @@ -5,10 +5,10 @@ import ( "fmt" "log" "sync" + "time" "openreplay/backend/pkg/db/postgres" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/utime" ) const MAX_ATTEMPTS_IN_A_ROW = 4 @@ -20,10 +20,10 @@ type requester interface { } type requestData struct { - LastMessageTimestamp uint64 // `json:"lastMessageTimestamp, string"` - LastMessageId string + LastMessageTimestamp uint64 // `json:"lastMessageTimestamp, string"` + LastMessageId string UnsuccessfullAttemptsCount int - LastAttemptTimestamp int64 + LastAttemptTimestamp int64 } type client struct { @@ -31,19 +31,19 @@ type client struct { requester integration *postgres.Integration // TODO: timeout ? - mux sync.Mutex + mux sync.Mutex updateChan chan<- postgres.Integration - evChan chan<- *SessionErrorEvent - errChan chan<- error + evChan chan<- *SessionErrorEvent + errChan chan<- error } type SessionErrorEvent struct { SessionID uint64 - Token string + Token string *messages.RawErrorEvent } -type ClientMap map[ string ]*client +type ClientMap map[string]*client func NewClient(i *postgres.Integration, updateChan chan<- postgres.Integration, evChan chan<- *SessionErrorEvent, errChan chan<- error) (*client, error) { c := new(client) @@ -60,15 +60,14 @@ func NewClient(i *postgres.Integration, updateChan chan<- postgres.Integration, // TODO: RequestData manager if c.requestData.LastMessageTimestamp == 0 { // ? - c.requestData.LastMessageTimestamp = uint64(utime.CurrentTimestamp() - 24*60*60*1000) + c.requestData.LastMessageTimestamp = uint64(time.Now().Add(-time.Hour * 24).UnixMilli()) } return c, nil } - // from outside -func (c* client) Update(i *postgres.Integration) error { +func (c *client) Update(i *postgres.Integration) error { c.mux.Lock() defer c.mux.Unlock() var r requester @@ -111,8 +110,8 @@ func (c *client) getLastMessageTimestamp() uint64 { } func (c *client) setLastMessageId(timestamp uint64, id string) { //if timestamp >= c.requestData.LastMessageTimestamp { - c.requestData.LastMessageId = id - c.requestData.LastMessageTimestamp = timestamp + c.requestData.LastMessageId = id + c.requestData.LastMessageTimestamp = timestamp //} } func (c *client) getLastMessageId() string { @@ -128,18 +127,18 @@ func (c *client) Request() { c.mux.Lock() defer c.mux.Unlock() if c.requestData.UnsuccessfullAttemptsCount >= MAX_ATTEMPTS || - (c.requestData.UnsuccessfullAttemptsCount >= MAX_ATTEMPTS_IN_A_ROW && - utime.CurrentTimestamp() - c.requestData.LastAttemptTimestamp < ATTEMPTS_INTERVAL) { + (c.requestData.UnsuccessfullAttemptsCount >= MAX_ATTEMPTS_IN_A_ROW && + time.Now().UnixMilli()-c.requestData.LastAttemptTimestamp < ATTEMPTS_INTERVAL) { return } - c.requestData.LastAttemptTimestamp = utime.CurrentTimestamp() + c.requestData.LastAttemptTimestamp = time.Now().UnixMilli() err := c.requester.Request(c) if err != nil { log.Println("ERRROR L139") log.Println(err) c.handleError(err) - c.requestData.UnsuccessfullAttemptsCount++; + c.requestData.UnsuccessfullAttemptsCount++ } else { c.requestData.UnsuccessfullAttemptsCount = 0 } @@ -152,5 +151,3 @@ func (c *client) Request() { c.integration.RequestData = rd c.updateChan <- *c.integration } - - diff --git a/backend/services/integrations/integration/datadog.go b/backend/services/integrations/integration/datadog.go index eb7b5daee..096c3b822 100644 --- a/backend/services/integrations/integration/datadog.go +++ b/backend/services/integrations/integration/datadog.go @@ -1,38 +1,37 @@ package integration import ( - "fmt" - "net/http" - "encoding/json" "bytes" - "time" + "encoding/json" + "fmt" "io" - "io/ioutil" + "io/ioutil" + "net/http" + "time" - "openreplay/backend/pkg/utime" "openreplay/backend/pkg/messages" ) -/* +/* We collect Logs. Datadog also has Events */ type datadog struct { - ApplicationKey string //`json:"application_key"` - ApiKey string //`json:"api_key"` + ApplicationKey string //`json:"application_key"` + ApiKey string //`json:"api_key"` } type datadogResponce struct { - Logs []json.RawMessage + Logs []json.RawMessage NextLogId *string - Status string + Status string } type datadogLog struct { Content struct { - Timestamp string - Message string + Timestamp string + Message string Attributes struct { Error struct { // Not sure about this Message string @@ -48,10 +47,10 @@ func (d *datadog) makeRequest(nextLogId *string, fromTs uint64, toTs uint64) (*h d.ApplicationKey, ) startAt := "null" - if nextLogId != nil && *nextLogId != "" { + if nextLogId != nil && *nextLogId != "" { startAt = *nextLogId } - // Query: status:error/info/warning? + // Query: status:error/info/warning? // openReplaySessionToken instead of asayer_session_id jsonBody := fmt.Sprintf(`{ "limit": 1000, @@ -72,8 +71,8 @@ func (d *datadog) makeRequest(nextLogId *string, fromTs uint64, toTs uint64) (*h } func (d *datadog) Request(c *client) error { - fromTs := c.getLastMessageTimestamp() + 1 // From next millisecond - toTs := uint64(utime.CurrentTimestamp()) + fromTs := c.getLastMessageTimestamp() + 1 // From next millisecond + toTs := uint64(time.Now().UnixMilli()) var nextLogId *string for { req, err := d.makeRequest(nextLogId, fromTs, toTs) @@ -111,16 +110,16 @@ func (d *datadog) Request(c *client) error { c.errChan <- err continue } - timestamp := uint64(utime.ToMilliseconds(parsedTime)) + timestamp := uint64(parsedTime.UnixMilli()) c.setLastMessageTimestamp(timestamp) c.evChan <- &SessionErrorEvent{ //SessionID: sessionID, Token: token, RawErrorEvent: &messages.RawErrorEvent{ - Source: "datadog", + Source: "datadog", Timestamp: timestamp, - Name: ddLog.Content.Attributes.Error.Message, - Payload: string(jsonLog), + Name: ddLog.Content.Attributes.Error.Message, + Payload: string(jsonLog), }, } } @@ -129,4 +128,4 @@ func (d *datadog) Request(c *client) error { return nil } } -} \ No newline at end of file +} diff --git a/backend/services/integrations/integration/elasticsearch.go b/backend/services/integrations/integration/elasticsearch.go index 14480e0b8..dd6f5d5f9 100644 --- a/backend/services/integrations/integration/elasticsearch.go +++ b/backend/services/integrations/integration/elasticsearch.go @@ -12,7 +12,6 @@ import ( "time" "openreplay/backend/pkg/messages" - "openreplay/backend/pkg/utime" ) type elasticsearch struct { @@ -164,7 +163,7 @@ func (es *elasticsearch) Request(c *client) error { c.errChan <- err continue } - timestamp := uint64(utime.ToMilliseconds(esLog.Time)) + timestamp := uint64(esLog.Time.UnixMilli()) c.setLastMessageTimestamp(timestamp) var sessionID uint64 diff --git a/backend/services/integrations/integration/newrelic.go b/backend/services/integrations/integration/newrelic.go index 937ab166d..2dce79aa5 100644 --- a/backend/services/integrations/integration/newrelic.go +++ b/backend/services/integrations/integration/newrelic.go @@ -2,25 +2,24 @@ package integration import ( "encoding/json" - "time" + "errors" "fmt" - "net/http" "io" - "io/ioutil" - "errors" + "io/ioutil" + "net/http" + "time" "openreplay/backend/pkg/messages" ) /* - We use insights-api for query. They also have Logs and Events + We use insights-api for query. They also have Logs and Events */ - // TODO: Eu/us type newrelic struct { - ApplicationId string //`json:"application_id"` - XQueryKey string //`json:"x_query_key"` + ApplicationId string //`json:"application_id"` + XQueryKey string //`json:"x_query_key"` } // TODO: Recheck @@ -34,14 +33,14 @@ type newrelicResponce struct { type newrelicEvent struct { //AsayerSessionID uint64 `json:"asayer_session_id,string"` // string/int decoder? OpenReplaySessionToken string `json:"openReplaySessionToken"` - ErrorClass string `json:"error.class"` - Timestamp uint64 `json:"timestamp"` + ErrorClass string `json:"error.class"` + Timestamp uint64 `json:"timestamp"` } func (nr *newrelic) Request(c *client) error { sinceTs := c.getLastMessageTimestamp() + 1000 // From next second // In docs - format "yyyy-mm-dd HH:MM:ss", but time.RFC3339 works fine too - sinceFormatted := time.Unix(0, int64(sinceTs*1e6)).Format(time.RFC3339) + sinceFormatted := time.UnixMilli(int64(sinceTs)).Format(time.RFC3339) // US/EU endpoint ?? requestURL := fmt.Sprintf("https://insights-api.eu.newrelic.com/v1/accounts/%v/query", nr.ApplicationId) req, err := http.NewRequest("GET", requestURL, nil) @@ -64,11 +63,10 @@ func (nr *newrelic) Request(c *client) error { } defer resp.Body.Close() - // 401 (unauthorised) if wrong XQueryKey/deploymentServer is wrong or 403 (Forbidden) if ApplicationId is wrong // 400 if Query has problems if resp.StatusCode >= 400 { - io.Copy(ioutil.Discard, resp.Body) // Read the body to free socket + io.Copy(ioutil.Discard, resp.Body) // Read the body to free socket return fmt.Errorf("Newrelic: server respond with the code %v| Request: ", resp.StatusCode, *req) } // Pagination depending on returning metadata ? @@ -92,13 +90,13 @@ func (nr *newrelic) Request(c *client) error { c.evChan <- &SessionErrorEvent{ Token: e.OpenReplaySessionToken, RawErrorEvent: &messages.RawErrorEvent{ - Source: "newrelic", + Source: "newrelic", Timestamp: e.Timestamp, - Name: e.ErrorClass, - Payload: string(jsonEvent), + Name: e.ErrorClass, + Payload: string(jsonEvent), }, } } } return nil -} \ No newline at end of file +} diff --git a/backend/services/integrations/integration/sentry.go b/backend/services/integrations/integration/sentry.go index 0330430c3..1c5bfdaad 100644 --- a/backend/services/integrations/integration/sentry.go +++ b/backend/services/integrations/integration/sentry.go @@ -1,44 +1,41 @@ package integration import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" "net/http" "net/url" - "encoding/json" - "strings" - "fmt" - "time" "strconv" - "io" - "io/ioutil" + "strings" + "time" - "openreplay/backend/pkg/utime" "openreplay/backend/pkg/messages" ) - -/* +/* They also have different stuff - Documentation says: + Documentation says: "Note: This endpoint is experimental and may be removed without notice." */ type sentry struct { OrganizationSlug string // `json:"organization_slug"` - ProjectSlug string // `json:"project_slug"` - Token string // `json:"token"` + ProjectSlug string // `json:"project_slug"` + Token string // `json:"token"` } type sentryEvent struct { Tags []struct { - Key string - Value string `json:"value"` + Key string + Value string `json:"value"` } - DateCreated string `json:"dateCreated"` // or dateReceived ? - Title string - EventID string `json:"eventID"` + DateCreated string `json:"dateCreated"` // or dateReceived ? + Title string + EventID string `json:"eventID"` } - func (sn *sentry) Request(c *client) error { requestURL := fmt.Sprintf("https://sentry.io/api/0/projects/%v/%v/events/", sn.OrganizationSlug, sn.ProjectSlug) req, err := http.NewRequest("GET", requestURL, nil) @@ -88,9 +85,9 @@ PageLoop: c.errChan <- fmt.Errorf("%v | Event: %v", err, e) continue } - timestamp := uint64(utime.ToMilliseconds(parsedTime)) + timestamp := uint64(parsedTime.UnixMilli()) // TODO: not to receive all the messages (use default integration timestamp) - if firstEvent { // TODO: reverse range? + if firstEvent { // TODO: reverse range? c.setLastMessageId(timestamp, e.EventID) firstEvent = false } @@ -117,12 +114,12 @@ PageLoop: c.evChan <- &SessionErrorEvent{ SessionID: sessionID, - Token: token, + Token: token, RawErrorEvent: &messages.RawErrorEvent{ - Source: "sentry", + Source: "sentry", Timestamp: timestamp, - Name: e.Title, - Payload: string(jsonEvent), + Name: e.Title, + Payload: string(jsonEvent), }, } } @@ -137,7 +134,7 @@ PageLoop: return fmt.Errorf("Link header format error. Got: '%v'", linkHeader) } - nextLinkInfo := pagInfo[ 1 ] + nextLinkInfo := pagInfo[1] if strings.Contains(nextLinkInfo, `results="false"`) { break } @@ -151,4 +148,4 @@ PageLoop: } } return nil -} \ No newline at end of file +} diff --git a/backend/services/integrations/integration/stackdriver.go b/backend/services/integrations/integration/stackdriver.go index bb8e3cef9..e852d5d36 100644 --- a/backend/services/integrations/integration/stackdriver.go +++ b/backend/services/integrations/integration/stackdriver.go @@ -1,22 +1,19 @@ package integration - import ( - "google.golang.org/api/option" "cloud.google.com/go/logging/logadmin" "google.golang.org/api/iterator" - - //"strconv" - "encoding/json" - "time" - "fmt" - "context" + "google.golang.org/api/option" + + //"strconv" + "context" + "encoding/json" + "fmt" + "time" - "openreplay/backend/pkg/utime" "openreplay/backend/pkg/messages" ) - // Old: asayerSessionId const SD_FILTER_QUERY = ` @@ -28,7 +25,7 @@ const SD_FILTER_QUERY = ` type stackdriver struct { ServiceAccountCredentials string // `json:"service_account_credentials"` - LogName string // `json:"log_name"` + LogName string // `json:"log_name"` } type saCreds struct { @@ -37,10 +34,10 @@ type saCreds struct { func (sd *stackdriver) Request(c *client) error { fromTs := c.getLastMessageTimestamp() + 1 // Timestamp is RFC3339Nano, so we take the next millisecond - fromFormatted := time.Unix(0, int64(fromTs *1e6)).Format(time.RFC3339Nano) + fromFormatted := time.UnixMilli(int64(fromTs)).Format(time.RFC3339Nano) ctx := context.Background() - var parsedCreds saCreds + var parsedCreds saCreds err := json.Unmarshal([]byte(sd.ServiceAccountCredentials), &parsedCreds) if err != nil { return err @@ -49,56 +46,56 @@ func (sd *stackdriver) Request(c *client) error { opt := option.WithCredentialsJSON([]byte(sd.ServiceAccountCredentials)) client, err := logadmin.NewClient(ctx, parsedCreds.ProjectId, opt) if err != nil { - return err + return err } defer client.Close() - - filter := fmt.Sprintf(SD_FILTER_QUERY, parsedCreds.ProjectId, sd.LogName, fromFormatted) - // By default, Entries are listed from oldest to newest. - /* ResourceNames(rns []string) - "projects/[PROJECT_ID]" - "organizations/[ORGANIZATION_ID]" - "billingAccounts/[BILLING_ACCOUNT_ID]" - "folders/[FOLDER_ID]" - */ - it := client.Entries(ctx, logadmin.Filter(filter)) - // TODO: Pagination: - //pager := iterator.NewPager(it, 1000, "") - //nextToken, err := pager.NextPage(&entries) - //if nextToken == "" { break } - for { - e, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - return err - } + filter := fmt.Sprintf(SD_FILTER_QUERY, parsedCreds.ProjectId, sd.LogName, fromFormatted) + // By default, Entries are listed from oldest to newest. + /* ResourceNames(rns []string) + "projects/[PROJECT_ID]" + "organizations/[ORGANIZATION_ID]" + "billingAccounts/[BILLING_ACCOUNT_ID]" + "folders/[FOLDER_ID]" + */ + it := client.Entries(ctx, logadmin.Filter(filter)) - token := e.Labels["openReplaySessionToken"] - // sessionID, err := strconv.ParseUint(strSessionID, 10, 64) - // if err != nil { - // c.errChan <- err - // continue - // } - jsonEvent, err := json.Marshal(e) - if err != nil { - c.errChan <- err - continue - } - timestamp := uint64(utime.ToMilliseconds(e.Timestamp)) - c.setLastMessageTimestamp(timestamp) - c.evChan <- &SessionErrorEvent{ + // TODO: Pagination: + //pager := iterator.NewPager(it, 1000, "") + //nextToken, err := pager.NextPage(&entries) + //if nextToken == "" { break } + for { + e, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return err + } + + token := e.Labels["openReplaySessionToken"] + // sessionID, err := strconv.ParseUint(strSessionID, 10, 64) + // if err != nil { + // c.errChan <- err + // continue + // } + jsonEvent, err := json.Marshal(e) + if err != nil { + c.errChan <- err + continue + } + timestamp := uint64(e.Timestamp.UnixMilli()) + c.setLastMessageTimestamp(timestamp) + c.evChan <- &SessionErrorEvent{ //SessionID: sessionID, Token: token, RawErrorEvent: &messages.RawErrorEvent{ - Source: "stackdriver", + Source: "stackdriver", Timestamp: timestamp, - Name: e.InsertID, // not sure about that - Payload: string(jsonEvent), + Name: e.InsertID, // not sure about that + Payload: string(jsonEvent), }, } } return nil -} \ No newline at end of file +} diff --git a/backend/services/integrations/integration/sumologic.go b/backend/services/integrations/integration/sumologic.go index 2660dd6ac..8ff39ec9e 100644 --- a/backend/services/integrations/integration/sumologic.go +++ b/backend/services/integrations/integration/sumologic.go @@ -1,20 +1,19 @@ package integration import ( - "net/http" - "time" "encoding/json" "fmt" - "strings" "io" - "io/ioutil" + "io/ioutil" + "net/http" + "strings" + "time" - "openreplay/backend/pkg/utime" "openreplay/backend/pkg/messages" ) -/* - The maximum value for limit is 10,000 messages or 100 MB in total message size, +/* + The maximum value for limit is 10,000 messages or 100 MB in total message size, which means the query may return less than 10,000 messages if you exceed the size limit. API Documentation: https://help.sumologic.com/APIs/Search-Job-API/About-the-Search-Job-API @@ -22,31 +21,30 @@ import ( const SL_LIMIT = 10000 type sumologic struct { - AccessId string // `json:"access_id"` - AccessKey string // `json:"access_key"` - cookies []*http.Cookie + AccessId string // `json:"access_id"` + AccessKey string // `json:"access_key"` + cookies []*http.Cookie } - type sumplogicJobResponce struct { Id string } type sumologicJobStatusResponce struct { - State string + State string MessageCount int //PendingErrors []string } type sumologicResponce struct { - Messages [] struct { + Messages []struct { Map json.RawMessage } } type sumologicEvent struct { Timestamp uint64 `json:"_messagetime,string"` - Raw string `json:"_raw"` + Raw string `json:"_raw"` } func (sl *sumologic) deleteJob(jobId string, errChan chan<- error) { @@ -68,10 +66,9 @@ func (sl *sumologic) deleteJob(jobId string, errChan chan<- error) { resp.Body.Close() } - func (sl *sumologic) Request(c *client) error { fromTs := c.getLastMessageTimestamp() + 1 // From next millisecond - toTs := utime.CurrentTimestamp() + toTs := time.Now().UnixMilli() requestURL := fmt.Sprintf("https://api.%vsumologic.com/api/v1/search/jobs", "eu.") // deployment server?? jsonBody := fmt.Sprintf(`{ "query": "\"openReplaySessionToken=\" AND (*error* OR *fail* OR *exception*)", @@ -132,7 +129,7 @@ func (sl *sumologic) Request(c *client) error { tick := time.Tick(5 * time.Second) for { - <- tick + <-tick resp, err = http.DefaultClient.Do(req) if err != nil { return err // TODO: retry, counter/timeout @@ -147,12 +144,12 @@ func (sl *sumologic) Request(c *client) error { } if jobStatus.State == "DONE GATHERING RESULTS" { offset := 0 - for ;offset < jobStatus.MessageCount; { + for offset < jobStatus.MessageCount { requestURL = fmt.Sprintf( - "https://api.%vsumologic.com/api/v1/search/jobs/%v/messages?offset=%v&limit=%v", - "eu.", - jobResponce.Id, - offset, + "https://api.%vsumologic.com/api/v1/search/jobs/%v/messages?offset=%v&limit=%v", + "eu.", + jobResponce.Id, + offset, SL_LIMIT, ) req, err = http.NewRequest("GET", requestURL, nil) @@ -190,17 +187,17 @@ func (sl *sumologic) Request(c *client) error { } name := e.Raw if len(name) > 20 { - name = name[:20] // not sure about that + name = name[:20] // not sure about that } c.setLastMessageTimestamp(e.Timestamp) c.evChan <- &SessionErrorEvent{ //SessionID: sessionID, Token: token, RawErrorEvent: &messages.RawErrorEvent{ - Source: "sumologic", + Source: "sumologic", Timestamp: e.Timestamp, - Name: name, - Payload: string(m.Map), //e.Raw ? + Name: name, + Payload: string(m.Map), //e.Raw ? }, } @@ -209,11 +206,11 @@ func (sl *sumologic) Request(c *client) error { } break } - if jobStatus.State != "NOT STARTED" && + if jobStatus.State != "NOT STARTED" && jobStatus.State != "GATHERING RESULTS" { // error break } } return nil -} \ No newline at end of file +} diff --git a/ee/backend/pkg/kafka/consumer.go b/ee/backend/pkg/kafka/consumer.go index 82aa56d50..cb3714316 100644 --- a/ee/backend/pkg/kafka/consumer.go +++ b/ee/backend/pkg/kafka/consumer.go @@ -147,7 +147,7 @@ func (consumer *Consumer) ConsumeNext() error { if e.TopicPartition.Error != nil { return errors.Wrap(e.TopicPartition.Error, "Consumer Partition Error") } - ts := e.Timestamp.UnixNano() / 1e6 + ts := e.Timestamp.UnixMilli() consumer.messageHandler(decodeKey(e.Key), e.Value, &types.Meta{ Topic: *(e.TopicPartition.Topic), ID: uint64(e.TopicPartition.Offset), From d7710356e94505e2146528412f9517709168b2f2 Mon Sep 17 00:00:00 2001 From: ShiKhu Date: Fri, 1 Apr 2022 22:15:51 +0200 Subject: [PATCH 12/15] feat(tracker-fetch):3.5.3: common improved sanitiser --- tracker/tracker-fetch/README.md | 123 ++++++++++++++++++++++++----- tracker/tracker-fetch/package.json | 2 +- tracker/tracker-fetch/src/index.ts | 122 +++++++++++++++------------- 3 files changed, 170 insertions(+), 77 deletions(-) diff --git a/tracker/tracker-fetch/README.md b/tracker/tracker-fetch/README.md index b7fca2e4b..53fddcf8e 100644 --- a/tracker/tracker-fetch/README.md +++ b/tracker/tracker-fetch/README.md @@ -1,7 +1,6 @@ -# OpenReplay Tracker Fetch plugin +# Fetch plugin for OpenReplay -Tracker plugin to support tracking of the `fetch` requests payload. -Additionally it populates the requests with `sessionToken` header for backend logging. +This plugin allows you to capture `fetch` payloads and inspect them later on while replaying session recordings. This is very useful for understanding and fixing issues. ## Installation @@ -11,36 +10,120 @@ npm i @openreplay/tracker-fetch ## Usage -Initialize the `@openreplay/tracker` package as usual and load the plugin into it. -Then you can use the provided `fetch` method from the plugin instead of built-in. +Use the provided `fetch` method from the plugin instead of the one built-in. + +### If your website is a Single Page Application (SPA) ```js -import Tracker from '@openreplay/tracker'; +import tracker from '@openreplay/tracker'; import trackerFetch from '@openreplay/tracker-fetch'; -const tracker = new Tracker({ - projectKey: YOUR_PROJECT_KEY, +const tracker = new OpenReplay({ + projectKey: PROJECT_KEY }); +const fetch = tracker.use(trackerFetch(options)); // check list of available options below + tracker.start(); -export const fetch = tracker.use(trackerFetch({ /* options here*/ })); - -fetch('https://my.api.io/resource').then(response => response.json()).then(body => console.log(body)); +fetch('https://myapi.com/').then(response => console.log(response.json())); ``` -Options: -```ts -{ - failuresOnly: boolean, // default false - sessionTokenHeader: string | undefined, // default undefined - ignoreHeaders: Array | boolean, // default [ 'Cookie', 'Set-Cookie', 'Authorization' ] +### If your web app is Server-Side-Rendered (SSR) + +Follow the below example if your app is SSR. Ensure `tracker.start()` is called once the app is started (in `useEffect` or `componentDidMount`). + +```js +import OpenReplay from '@openreplay/tracker/cjs'; +import trackerFetch from '@openreplay/tracker-fetch/cjs'; + +const tracker = new OpenReplay({ + projectKey: PROJECT_KEY +}); +const fetch = tracker.use(trackerFetch(options)); // check list of available options below + +//... +function MyApp() { + useEffect(() => { // use componentDidMount in case of React Class Component + tracker.start(); + + fetch('https://myapi.com/').then(response => console.log(response.json())); + }, []) +//... } ``` -Set `failuresOnly` option to `true` if you want to record only requests with the status code >= 400. +## Options -In case you use [OpenReplay integrations (sentry, bugsnag or others)](https://docs.openreplay.com/integrations), you can use `sessionTokenHeader` option to specify the header name. This header will be appended automatically to the each fetch request and will contain OpenReplay session identificator value. +```js +trackerFetch({ + overrideGlobal: boolean; + failuresOnly: boolean; + sessionTokenHeader: string; + ignoreHeaders: Array | boolean; + sanitiser: (RequestResponseData) => RequestResponseData | null; +}) +``` -You can define list of headers that you don't want to capture with the `ignoreHeaders` options. Set its value to `false` if you want to catch them all (`true` if opposite). By default plugin ignores the list of headers that might be sensetive such as `[ 'Cookie', 'Set-Cookie', 'Authorization' ]`. +- `overrideGlobal`: Overrides the default `window.fetch`. Default: `false`. +- `failuresOnly`: Captures requests having 4xx-5xx HTTP status code. Default: `false`. +- `sessionTokenHeader`: In case you have enabled some of our backend [integrations](/integrations) (i.e. Sentry), you can use this option to specify the header name (i.e. 'X-OpenReplay-SessionToken'). This latter gets appended automatically to each fetch request to contain the OpenReplay sessionToken's value. Default: `undefined`. +- `ignoreHeaders`: Helps define a list of headers you don't wish to capture. Set its value to `false` to capture all of them (`true` if none). Default: `['Cookie', 'Set-Cookie', 'Authorization']` so sensitive headers won't be captured. +- `sanitiser`: Sanitise sensitive data from fetch request/response or ignore request comletely. You can redact fields on the request object by modifying then returning it from the function: +```typescript +interface RequestData { + body: BodyInit | null | undefined; // whatewer you've put in the init.body in fetch(url, init) + headers: Record; +} + +interface ResponseData { + body: string | Object | null; // Object if response is of JSON type + headers: Record; +} + +interface RequestResponseData { + readonly status: number; + readonly method: string; + url: string; + request: RequestData; + response: ResponseData; +} + +sanitiser: (data: RequestResponseData) => { // sanitise the body or headers + if (data.url === "/auth") { + data.request.body = null + } + + if (data.request.headers['x-auth-token']) { // can also use ignoreHeaders option instead + data.request.headers['x-auth-token'] = 'SANITISED'; + } + + // Sanitise response + if (data.status < 400 && data.response.body.token) { + data.response.body.token = "" + } + + return data +} + +// OR + +sanitiser: data => { // ignore requests that start with /secure + if (data.url.startsWith("/secure")) { + return null + } + return data +} + +// OR + +sanitiser: data => { // sanitise request url: replace all numbers + data.url = data.url.replace(/\d/g, "*") + return data +} +``` + +## Troubleshooting + +Having trouble setting up this plugin? please connect to our [Discord](https://discord.openreplay.com) and get help from our community. \ No newline at end of file diff --git a/tracker/tracker-fetch/package.json b/tracker/tracker-fetch/package.json index 0b1373edc..c13b1a28b 100644 --- a/tracker/tracker-fetch/package.json +++ b/tracker/tracker-fetch/package.json @@ -1,7 +1,7 @@ { "name": "@openreplay/tracker-fetch", "description": "Tracker plugin for fetch requests recording ", - "version": "3.5.2", + "version": "3.5.3", "keywords": [ "fetch", "logging", diff --git a/tracker/tracker-fetch/src/index.ts b/tracker/tracker-fetch/src/index.ts index 93e39cce5..922913923 100644 --- a/tracker/tracker-fetch/src/index.ts +++ b/tracker/tracker-fetch/src/index.ts @@ -1,38 +1,48 @@ import { App, Messages } from '@openreplay/tracker'; -interface Request { - url: string, - body: string | Object, - headers: Record, +interface RequestData { + body: BodyInit | null | undefined + headers: Record } -interface Response { - url: string, - status: number, - body: string, - headers: Record, +interface ResponseData { + body: string | Object | null + headers: Record } +interface RequestResponseData { + readonly status: number + readonly method: string + url: string + request: RequestData + response: ResponseData +} + + export interface Options { - sessionTokenHeader?: string; - replaceDefault: boolean; // overrideDefault ? - failuresOnly: boolean; - ignoreHeaders: Array | boolean; - requestSanitizer: ((Request) => Request | null) | null; - responseSanitizer: ((Response) => Response | null) | null; + sessionTokenHeader?: string + failuresOnly: boolean + overrideGlobal: boolean + ignoreHeaders: Array | boolean + sanitiser?: (RequestResponseData) => RequestResponseData | null + + requestSanitizer?: any + responseSanitizer?: any } export default function(opts: Partial = {}) { const options: Options = Object.assign( { - replaceDefault: false, + overrideGlobal: false, failuresOnly: false, ignoreHeaders: [ 'Cookie', 'Set-Cookie', 'Authorization' ], - requestSanitizer: null, - responseSanitizer: null, }, opts, ); + if (options.requestSanitizer && options.responseSanitizer) { + console.warn("OpenReplay fetch plugin: `requestSanitizer` and `responseSanitizer` options are depricated. Please, use `sanitiser` instead (check out documentation at https://docs.openreplay.com/plugins/fetch).") + } + const origFetch = window.fetch return (app: App | null) => { if (app === null) { @@ -90,56 +100,55 @@ export default function(opts: Partial = {}) { r.headers.forEach((v, n) => { if (!isHIgnoring(n)) resHs[n] = v }) } - // Request forming - let reqBody = '' - if (typeof init.body === 'string') { - reqBody = init.body - } else if (typeof init.body === 'object') { - try { - reqBody = JSON.stringify(init.body) - } catch {} - } - let req: Request | null = { - url: input, + const req: RequestData = { headers: reqHs, - body: reqBody, - } - if (options.requestSanitizer !== null) { - req = options.requestSanitizer(req) - if (!req) { - return - } + body: init.body, } // Response forming - let res: Response | null = { - url: input, - status: r.status, + const res: ResponseData = { headers: resHs, body: text, } - if (options.responseSanitizer !== null) { - res = options.responseSanitizer(res) - if (!res) { + + const method = typeof init.method === 'string' + ? init.method.toUpperCase() + : 'GET' + let reqResInfo: RequestResponseData | null = { + url: input, + method, + status: r.status, + request: req, + response: res, + } + if (options.sanitiser) { + try { + reqResInfo.response.body = JSON.parse(text) as Object // Why the returning type is "any"? + } catch {} + reqResInfo = options.sanitiser(reqResInfo) + if (!reqResInfo) { return } } - const reqStr = JSON.stringify({ - headers: req.headers, - body: req.body, - }) - const resStr = JSON.stringify({ - headers: res.headers, - body: res.body, - }) + const getStj = (r: RequestData | ResponseData): string => { + if (r && typeof r.body !== 'string') { + try { + r.body = JSON.stringify(r.body) + } catch { + r.body = "" + //app.log.warn("Openreplay fetch") // TODO: version check + } + } + return JSON.stringify(r) + } app.send( Messages.Fetch( - typeof init.method === 'string' ? init.method.toUpperCase() : 'GET', - input, - reqStr, - resStr, + method, + String(reqResInfo.url), + getStj(reqResInfo.request), + getStj(reqResInfo.response), r.status, startTime + performance.timing.navigationStart, duration, @@ -147,8 +156,9 @@ export default function(opts: Partial = {}) { ) }); return response; - }; - if (options.replaceDefault) { + } + + if (options.overrideGlobal) { window.fetch = fetch } return fetch; From 3b3015e025225ae0ed85ee65981daef8675f385c Mon Sep 17 00:00:00 2001 From: ShiKhu Date: Sun, 3 Apr 2022 13:31:54 +0200 Subject: [PATCH 13/15] fix(backend-http): always Close() body --- backend/services/http/handlers-ios.go | 4 ++-- backend/services/http/handlers-web.go | 2 +- backend/services/http/handlers.go | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/backend/services/http/handlers-ios.go b/backend/services/http/handlers-ios.go index f15a6af60..8116980e1 100644 --- a/backend/services/http/handlers-ios.go +++ b/backend/services/http/handlers-ios.go @@ -50,7 +50,7 @@ func startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) { startTime := time.Now() req := &request{} body := http.MaxBytesReader(w, r.Body, JSON_SIZE_LIMIT) - //defer body.Close() + defer body.Close() if err := json.NewDecoder(body).Decode(req); err != nil { responseWithError(w, http.StatusBadRequest, err) return @@ -155,7 +155,7 @@ func imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) { } r.Body = http.MaxBytesReader(w, r.Body, FILES_SIZE_LIMIT) - // defer r.Body.Close() + defer r.Body.Close() err = r.ParseMultipartForm(1e6) // ~1Mb if err == http.ErrNotMultipart || err == http.ErrMissingBoundary { responseWithError(w, http.StatusUnsupportedMediaType, err) diff --git a/backend/services/http/handlers-web.go b/backend/services/http/handlers-web.go index 29dcf161d..dcbd33720 100644 --- a/backend/services/http/handlers-web.go +++ b/backend/services/http/handlers-web.go @@ -41,7 +41,7 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) { startTime := time.Now() req := &request{} body := http.MaxBytesReader(w, r.Body, JSON_SIZE_LIMIT) // what if Body == nil?? // use r.ContentLength to return specific error? - //defer body.Close() + defer body.Close() if err := json.NewDecoder(body).Decode(req); err != nil { responseWithError(w, http.StatusBadRequest, err) return diff --git a/backend/services/http/handlers.go b/backend/services/http/handlers.go index e45e84e64..dd73925af 100644 --- a/backend/services/http/handlers.go +++ b/backend/services/http/handlers.go @@ -9,11 +9,11 @@ import ( gzip "github.com/klauspost/pgzip" ) -const JSON_SIZE_LIMIT int64 = 1e3 // 1Kb +const JSON_SIZE_LIMIT int64 = 1e3 // 1Kb func pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) { body := http.MaxBytesReader(w, r.Body, BEACON_SIZE_LIMIT) - //defer body.Close() + defer body.Close() var reader io.ReadCloser var err error switch r.Header.Get("Content-Encoding") { From 72f2a96f31eda3129fd500706bc4cd145081bb49 Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Mon, 4 Apr 2022 14:09:49 +0200 Subject: [PATCH 14/15] parrot global changes --- api/chalicelib/core/telemetry.py | 4 ++-- ee/api/chalicelib/core/telemetry.py | 4 ++-- ee/backend/pkg/license/check.go | 2 +- scripts/helm/install.sh | 2 +- scripts/helm/roles/openreplay/tasks/pre-check.yaml | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/api/chalicelib/core/telemetry.py b/api/chalicelib/core/telemetry.py index 48f403f57..28eb97f73 100644 --- a/api/chalicelib/core/telemetry.py +++ b/api/chalicelib/core/telemetry.py @@ -30,7 +30,7 @@ def compute(): RETURNING *,(SELECT email FROM public.users WHERE role='owner' LIMIT 1);""" ) data = cur.fetchone() - requests.post('https://parrot.asayer.io/os/telemetry', json={"stats": [process_data(data)]}) + requests.post('https://api.openreplay.com/os/telemetry', json={"stats": [process_data(data)]}) def new_client(): @@ -40,4 +40,4 @@ def new_client(): (SELECT email FROM public.users WHERE role='owner' LIMIT 1) AS email FROM public.tenants;""") data = cur.fetchone() - requests.post('https://parrot.asayer.io/os/signup', json=process_data(data)) + requests.post('https://api.openreplay.com/os/signup', json=process_data(data)) diff --git a/ee/api/chalicelib/core/telemetry.py b/ee/api/chalicelib/core/telemetry.py index d9843e37d..e05df7fdc 100644 --- a/ee/api/chalicelib/core/telemetry.py +++ b/ee/api/chalicelib/core/telemetry.py @@ -53,7 +53,7 @@ def compute(): RETURNING *,(SELECT email FROM users_ee WHERE role = 'owner' AND users_ee.tenant_id = tenants.tenant_id LIMIT 1);""" ) data = cur.fetchall() - requests.post('https://parrot.asayer.io/os/telemetry', + requests.post('https://api.openreplay.com/os/telemetry', json={"stats": [process_data(d, edition='ee') for d in data]}) @@ -65,4 +65,4 @@ def new_client(tenant_id): FROM public.tenants WHERE tenant_id=%(tenant_id)s;""", {"tenant_id": tenant_id})) data = cur.fetchone() - requests.post('https://parrot.asayer.io/os/signup', json=process_data(data, edition='ee')) \ No newline at end of file + requests.post('https://api.openreplay.com/os/signup', json=process_data(data, edition='ee')) \ No newline at end of file diff --git a/ee/backend/pkg/license/check.go b/ee/backend/pkg/license/check.go index 6b33a625e..771558946 100644 --- a/ee/backend/pkg/license/check.go +++ b/ee/backend/pkg/license/check.go @@ -33,7 +33,7 @@ func CheckLicense() { log.Fatal("Can not form a license check request.") } - resp, err := http.Post("https://parrot.asayer.io/os/license", "application/json", bytes.NewReader(requestBody)) + resp, err := http.Post("https://api.openreplay.com/os/license", "application/json", bytes.NewReader(requestBody)) if err != nil { log.Fatalf("Error while checking license. %v", err) } diff --git a/scripts/helm/install.sh b/scripts/helm/install.sh index 529d08a8c..2bf39988c 100755 --- a/scripts/helm/install.sh +++ b/scripts/helm/install.sh @@ -26,7 +26,7 @@ which docker &> /dev/null || { } -# https://parrot.asayer.io/os/license +# https://api.openreplay.com/os/license # payload: {"mid": "UUID of the machine", "license": ""} # response {"data":{"valid": TRUE|FALSE, "expiration": expiration date in ms}} diff --git a/scripts/helm/roles/openreplay/tasks/pre-check.yaml b/scripts/helm/roles/openreplay/tasks/pre-check.yaml index 7f85d0ea1..90e6021c8 100644 --- a/scripts/helm/roles/openreplay/tasks/pre-check.yaml +++ b/scripts/helm/roles/openreplay/tasks/pre-check.yaml @@ -108,7 +108,7 @@ - all - name: Checking Enterprise Licence uri: - url: https://parrot.asayer.io/os/license + url: https://api.openreplay.com/os/license body: mid: "UUID of the machine" license: "{{ enterprise_edition_license }}" From 2b55aef0a782af5e4eba1f17af30bf8f668572ef Mon Sep 17 00:00:00 2001 From: Taha Yassine Kraiem Date: Mon, 4 Apr 2022 14:29:49 +0200 Subject: [PATCH 15/15] feat(api): optimized boarding endpoints --- api/chalicelib/core/boarding.py | 86 +++++++++++++++--------------- api/chalicelib/core/projects.py | 10 ++++ ee/api/chalicelib/core/boarding.py | 86 +++++++++++++++--------------- ee/api/chalicelib/core/projects.py | 12 ++++- 4 files changed, 105 insertions(+), 89 deletions(-) diff --git a/api/chalicelib/core/boarding.py b/api/chalicelib/core/boarding.py index c303643c8..68843b2f8 100644 --- a/api/chalicelib/core/boarding.py +++ b/api/chalicelib/core/boarding.py @@ -5,39 +5,38 @@ from chalicelib.core import users def get_state(tenant_id): - my_projects = projects.get_projects(tenant_id=tenant_id, recording_state=False) - pids = [s["projectId"] for s in my_projects] + pids = projects.get_projects_ids(tenant_id=tenant_id) with pg_client.PostgresClient() as cur: recorded = False meta = False if len(pids) > 0: cur.execute( - cur.mogrify("""\ - SELECT - COUNT(*) - FROM public.sessions AS s - where s.project_id IN %(ids)s - LIMIT 1;""", + cur.mogrify("""SELECT EXISTS(( SELECT 1 + FROM public.sessions AS s + WHERE s.project_id IN %(ids)s)) AS exists;""", {"ids": tuple(pids)}) ) - recorded = cur.fetchone()["count"] > 0 + recorded = cur.fetchone()["exists"] meta = False if recorded: - cur.execute("""SELECT SUM((SELECT COUNT(t.meta) - FROM (VALUES (p.metadata_1), (p.metadata_2), (p.metadata_3), (p.metadata_4), (p.metadata_5), - (p.metadata_6), (p.metadata_7), (p.metadata_8), (p.metadata_9), (p.metadata_10), - (sessions.user_id)) AS t(meta) - WHERE t.meta NOTNULL)) - FROM public.projects AS p - LEFT JOIN LATERAL ( SELECT 'defined' - FROM public.sessions - WHERE sessions.project_id=p.project_id AND sessions.user_id IS NOT NULL - LIMIT 1) AS sessions(user_id) ON(TRUE) - WHERE p.deleted_at ISNULL;""" - ) + cur.execute("""SELECT EXISTS((SELECT 1 + FROM public.projects AS p + LEFT JOIN LATERAL ( SELECT 1 + FROM public.sessions + WHERE sessions.project_id = p.project_id + AND sessions.user_id IS NOT NULL + LIMIT 1) AS sessions(user_id) ON (TRUE) + WHERE p.deleted_at ISNULL + AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL + OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL + OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL + OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL + OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL + OR p.metadata_10 IS NOT NULL ) + )) AS exists;""") - meta = cur.fetchone()["sum"] > 0 + meta = cur.fetchone()["exists"] return [ {"task": "Install OpenReplay", @@ -58,22 +57,18 @@ def get_state(tenant_id): def get_state_installing(tenant_id): - my_projects = projects.get_projects(tenant_id=tenant_id, recording_state=False) - pids = [s["projectId"] for s in my_projects] + pids = projects.get_projects_ids(tenant_id=tenant_id) with pg_client.PostgresClient() as cur: recorded = False if len(pids) > 0: cur.execute( - cur.mogrify("""\ - SELECT - COUNT(*) - FROM public.sessions AS s - where s.project_id IN %(ids)s - LIMIT 1;""", + cur.mogrify("""SELECT EXISTS(( SELECT 1 + FROM public.sessions AS s + WHERE s.project_id IN %(ids)s)) AS exists;""", {"ids": tuple(pids)}) ) - recorded = cur.fetchone()["count"] > 0 + recorded = cur.fetchone()["exists"] return {"task": "Install OpenReplay", "done": recorded, @@ -82,20 +77,23 @@ def get_state_installing(tenant_id): def get_state_identify_users(tenant_id): with pg_client.PostgresClient() as cur: - cur.execute( - """SELECT SUM((SELECT COUNT(t.meta) - FROM (VALUES (p.metadata_1), (p.metadata_2), (p.metadata_3), (p.metadata_4), (p.metadata_5), - (p.metadata_6), (p.metadata_7), (p.metadata_8), (p.metadata_9), (p.metadata_10), - (sessions.user_id)) AS t(meta) - WHERE t.meta NOTNULL)) - FROM public.projects AS p - LEFT JOIN LATERAL ( SELECT 'defined' - FROM public.sessions - WHERE sessions.project_id=p.project_id AND sessions.user_id IS NOT NULL - LIMIT 1) AS sessions(user_id) ON(TRUE) - WHERE p.deleted_at ISNULL;""") + cur.execute("""SELECT EXISTS((SELECT 1 + FROM public.projects AS p + LEFT JOIN LATERAL ( SELECT 1 + FROM public.sessions + WHERE sessions.project_id = p.project_id + AND sessions.user_id IS NOT NULL + LIMIT 1) AS sessions(user_id) ON (TRUE) + WHERE p.deleted_at ISNULL + AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL + OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL + OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL + OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL + OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL + OR p.metadata_10 IS NOT NULL ) + )) AS exists;""") - meta = cur.fetchone()["sum"] > 0 + meta = cur.fetchone()["exists"] return {"task": "Identify Users", "done": meta, diff --git a/api/chalicelib/core/projects.py b/api/chalicelib/core/projects.py index c5ae912aa..c57360dc4 100644 --- a/api/chalicelib/core/projects.py +++ b/api/chalicelib/core/projects.py @@ -280,3 +280,13 @@ def update_capture_status(project_id, changes): ) return changes + + +def get_projects_ids(tenant_id): + with pg_client.PostgresClient() as cur: + cur.execute(f"""SELECT s.project_id + FROM public.projects AS s + WHERE s.deleted_at IS NULL + ORDER BY s.project_id;""") + rows = cur.fetchall() + return [r["project_id"] for r in rows] diff --git a/ee/api/chalicelib/core/boarding.py b/ee/api/chalicelib/core/boarding.py index 6690e59f2..8a2076b58 100644 --- a/ee/api/chalicelib/core/boarding.py +++ b/ee/api/chalicelib/core/boarding.py @@ -6,41 +6,40 @@ from chalicelib.core import projects def get_state(tenant_id): - my_projects = projects.get_projects(tenant_id=tenant_id, recording_state=False) - pids = [s["projectId"] for s in my_projects] + pids = projects.get_projects_ids(tenant_id=tenant_id) with pg_client.PostgresClient() as cur: recorded = False meta = False if len(pids) > 0: cur.execute( - cur.mogrify("""\ - SELECT - COUNT(*) - FROM public.sessions AS s - where s.project_id IN %(ids)s - LIMIT 1;""", + cur.mogrify("""SELECT EXISTS(( SELECT 1 + FROM public.sessions AS s + WHERE s.project_id IN %(ids)s)) AS exists;""", {"ids": tuple(pids)}) ) - recorded = cur.fetchone()["count"] > 0 + recorded = cur.fetchone()["exists"] meta = False if recorded: cur.execute( - cur.mogrify("""SELECT SUM((SELECT COUNT(t.meta) - FROM (VALUES (p.metadata_1), (p.metadata_2), (p.metadata_3), (p.metadata_4), (p.metadata_5), - (p.metadata_6), (p.metadata_7), (p.metadata_8), (p.metadata_9), (p.metadata_10), - (sessions.user_id)) AS t(meta) - WHERE t.meta NOTNULL)) - FROM public.projects AS p - LEFT JOIN LATERAL ( SELECT 'defined' - FROM public.sessions - WHERE sessions.project_id=p.project_id AND sessions.user_id IS NOT NULL - LIMIT 1) AS sessions(user_id) ON(TRUE) - WHERE p.tenant_id = %(tenant_id)s - AND p.deleted_at ISNULL;""" + cur.mogrify("""SELECT EXISTS((SELECT 1 + FROM public.projects AS p + LEFT JOIN LATERAL ( SELECT 1 + FROM public.sessions + WHERE sessions.project_id = p.project_id + AND sessions.user_id IS NOT NULL + LIMIT 1) AS sessions(user_id) ON (TRUE) + WHERE p.tenant_id = %(tenant_id)s AND p.deleted_at ISNULL + AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL + OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL + OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL + OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL + OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL + OR p.metadata_10 IS NOT NULL ) + )) AS exists;""" , {"tenant_id": tenant_id})) - meta = cur.fetchone()["sum"] > 0 + meta = cur.fetchone()["exists"] return [ {"task": "Install OpenReplay", @@ -61,22 +60,18 @@ def get_state(tenant_id): def get_state_installing(tenant_id): - my_projects = projects.get_projects(tenant_id=tenant_id, recording_state=False) - pids = [s["projectId"] for s in my_projects] + pids = projects.get_projects_ids(tenant_id=tenant_id) with pg_client.PostgresClient() as cur: recorded = False if len(pids) > 0: cur.execute( - cur.mogrify("""\ - SELECT - COUNT(*) - FROM public.sessions AS s - where s.project_id IN %(ids)s - LIMIT 1;""", + cur.mogrify("""SELECT EXISTS(( SELECT 1 + FROM public.sessions AS s + WHERE s.project_id IN %(ids)s)) AS exists;""", {"ids": tuple(pids)}) ) - recorded = cur.fetchone()["count"] > 0 + recorded = cur.fetchone()["exists"] return {"task": "Install OpenReplay", "done": recorded, @@ -86,21 +81,24 @@ def get_state_installing(tenant_id): def get_state_identify_users(tenant_id): with pg_client.PostgresClient() as cur: cur.execute( - cur.mogrify("""SELECT SUM((SELECT COUNT(t.meta) - FROM (VALUES (p.metadata_1), (p.metadata_2), (p.metadata_3), (p.metadata_4), (p.metadata_5), - (p.metadata_6), (p.metadata_7), (p.metadata_8), (p.metadata_9), (p.metadata_10), - (sessions.user_id)) AS t(meta) - WHERE t.meta NOTNULL)) - FROM public.projects AS p - LEFT JOIN LATERAL ( SELECT 'defined' - FROM public.sessions - WHERE sessions.project_id=p.project_id AND sessions.user_id IS NOT NULL - LIMIT 1) AS sessions(user_id) ON(TRUE) - WHERE p.tenant_id = %(tenant_id)s - AND p.deleted_at ISNULL;""" + cur.mogrify("""SELECT EXISTS((SELECT 1 + FROM public.projects AS p + LEFT JOIN LATERAL ( SELECT 1 + FROM public.sessions + WHERE sessions.project_id = p.project_id + AND sessions.user_id IS NOT NULL + LIMIT 1) AS sessions(user_id) ON (TRUE) + WHERE p.tenant_id = %(tenant_id)s AND p.deleted_at ISNULL + AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL + OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL + OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL + OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL + OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL + OR p.metadata_10 IS NOT NULL ) + )) AS exists;""" , {"tenant_id": tenant_id})) - meta = cur.fetchone()["sum"] > 0 + meta = cur.fetchone()["exists"] return {"task": "Identify Users", "done": meta, diff --git a/ee/api/chalicelib/core/projects.py b/ee/api/chalicelib/core/projects.py index 75a3a31d0..0255c8c8c 100644 --- a/ee/api/chalicelib/core/projects.py +++ b/ee/api/chalicelib/core/projects.py @@ -324,7 +324,7 @@ def is_authorized_batch(project_ids, tenant_id): query = cur.mogrify("""\ SELECT project_id FROM public.projects - where tenant_id =%(tenant_id)s + WHERE tenant_id =%(tenant_id)s AND project_id IN %(project_ids)s AND deleted_at IS NULL;""", {"tenant_id": tenant_id, "project_ids": tuple(project_ids)}) @@ -334,3 +334,13 @@ def is_authorized_batch(project_ids, tenant_id): ) rows = cur.fetchall() return [r["project_id"] for r in rows] + + +def get_projects_ids(tenant_id): + with pg_client.PostgresClient() as cur: + cur.execute(cur.mogrify("""SELECT s.project_id + FROM public.projects AS s + WHERE tenant_id =%(tenant_id)s AND s.deleted_at IS NULL + ORDER BY s.project_id;""", {"tenant_id": tenant_id})) + rows = cur.fetchall() + return [r["project_id"] for r in rows]