Merge branch 'dev' into player-refactoring-phase-1

This commit is contained in:
sylenien 2022-11-28 12:57:52 +01:00
commit 9cda85dddd
47 changed files with 481 additions and 308 deletions

View file

@ -266,7 +266,8 @@ def update(metric_id, user_id, project_id, data: schemas.UpdateCustomMetricsSche
params = {"metric_id": metric_id, "is_public": data.is_public, "name": data.name,
"user_id": user_id, "project_id": project_id, "view_type": data.view_type,
"metric_type": data.metric_type, "metric_of": data.metric_of,
"metric_value": data.metric_value, "metric_format": data.metric_format}
"metric_value": data.metric_value, "metric_format": data.metric_format,
"config": json.dumps(data.config.dict())}
for i, s in enumerate(data.series):
prefix = "u_"
if s.index is None:
@ -316,7 +317,8 @@ def update(metric_id, user_id, project_id, data: schemas.UpdateCustomMetricsSche
view_type= %(view_type)s, metric_type= %(metric_type)s,
metric_of= %(metric_of)s, metric_value= %(metric_value)s,
metric_format= %(metric_format)s,
edited_at = timezone('utc'::text, now())
edited_at = timezone('utc'::text, now()),
default_config = %(config)s
WHERE metric_id = %(metric_id)s
AND project_id = %(project_id)s
AND (user_id = %(user_id)s OR is_public)

View file

@ -111,6 +111,8 @@ def get_dashboard(project_id, user_id, dashboard_id):
for w in row["widgets"]:
w["created_at"] = TimeUTC.datetime_to_timestamp(w["created_at"])
w["edited_at"] = TimeUTC.datetime_to_timestamp(w["edited_at"])
w["config"]["col"] = w["default_config"]["col"]
w["config"]["row"] = w["default_config"]["row"]
for s in w["series"]:
s["created_at"] = TimeUTC.datetime_to_timestamp(s["created_at"])
return helper.dict_to_camel_case(row)

View file

@ -76,19 +76,21 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
rows = cur.fetchall()
# if recorded is requested, check if it was saved or computed
if recorded:
for r in rows:
u_values = []
params = {}
for i, r in enumerate(rows):
if r["first_recorded_session_at"] is None:
extra_update = ""
if r["recorded"]:
extra_update = ", first_recorded_session_at=to_timestamp(%(first_recorded)s/1000)"
query = cur.mogrify(f"""UPDATE public.projects
SET sessions_last_check_at=(now() at time zone 'utc')
{extra_update}
WHERE project_id=%(project_id)s""",
{"project_id": r["project_id"], "first_recorded": r["first_recorded"]})
cur.execute(query)
u_values.append(f"(%(project_id_{i})s,to_timestamp(%(first_recorded_{i})s/1000))")
params[f"project_id_{i}"] = r["project_id"]
params[f"first_recorded_{i}"] = r["first_recorded"] if r["recorded"] else None
r.pop("first_recorded_session_at")
r.pop("first_recorded")
if len(u_values) > 0:
query = cur.mogrify(f"""UPDATE public.projects
SET sessions_last_check_at=(now() at time zone 'utc'), first_recorded_session_at=u.first_recorded
FROM (VALUES {",".join(u_values)}) AS u(project_id,first_recorded)
WHERE projects.project_id=u.project_id;""", params)
cur.execute(query)
if recording_state and len(rows) > 0:
project_ids = [f'({r["project_id"]})' for r in rows]

View file

@ -191,7 +191,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
GROUP BY main.session_id)
AS T{i + 1} {"ON (TRUE)" if i > 0 else ""}
""")
n_stages=len(n_stages_query)
n_stages = len(n_stages_query)
if n_stages == 0:
return []
n_stages_query = " LEFT JOIN LATERAL ".join(n_stages_query)
@ -215,7 +215,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
AND ISE.session_id = stages_t.session_id
AND ISS.type!='custom' -- ignore custom issues because they are massive
{"AND ISS.type IN %(issueTypes)s" if len(filter_issues) > 0 else ""}
LIMIT 50 -- remove the limit to get exact stats
LIMIT 10 -- remove the limit to get exact stats
) AS issues_t ON (TRUE)
) AS stages_and_issues_t INNER JOIN sessions USING(session_id);
"""
@ -348,7 +348,7 @@ def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues,
if error_id not in errors:
errors[error_id] = []
ic = 0
row_issue_id=row['issue_id']
row_issue_id = row['issue_id']
if row_issue_id is not None:
if last_ts is None or (first_ts < row['issue_timestamp'] < last_ts):
if error_id == row_issue_id:
@ -533,6 +533,9 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
if is_sign:
n_critical_issues += n_issues_dict[issue_id]
# To limit the number of returned issues to the frontend
issues_dict["significant"] = issues_dict["significant"][:20]
issues_dict["insignificant"] = issues_dict["insignificant"][:20]
return n_critical_issues, issues_dict, total_drop_due_to_issues

View file

@ -874,14 +874,14 @@ class TryCustomMetricsPayloadSchema(CustomMetricChartPayloadSchema):
class CustomMetricsConfigSchema(BaseModel):
col: Optional[int] = Field(default=2)
col: Optional[int] = Field(...)
row: Optional[int] = Field(default=2)
position: Optional[int] = Field(default=0)
class CreateCustomMetricsSchema(TryCustomMetricsPayloadSchema):
series: List[CustomMetricCreateSeriesSchema] = Field(..., min_items=1)
config: CustomMetricsConfigSchema = Field(default=CustomMetricsConfigSchema())
config: CustomMetricsConfigSchema = Field(...)
@root_validator(pre=True)
def transform_series(cls, values):

View file

@ -32,7 +32,7 @@ func main() {
log.Fatalf("%v doesn't exist. %v", cfg.FsDir, err)
}
writer := sessionwriter.NewWriter(cfg.FsUlimit, cfg.FsDir, cfg.DeadSessionTimeout)
writer := sessionwriter.NewWriter(cfg.FsUlimit, cfg.FsDir, cfg.FileBuffer, cfg.SyncTimeout)
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
defer producer.Close(cfg.ProducerCloseTimeout)
@ -95,26 +95,20 @@ func main() {
counter.Update(msg.SessionID(), time.UnixMilli(ts))
}
// Write encoded message with index to session file
data := msg.EncodeWithIndex()
// Try to encode message to avoid null data inserts
data := msg.Encode()
if data == nil {
return
}
// Write message to file
if messages.IsDOMType(msg.TypeID()) {
if err := writer.WriteDOM(msg.SessionID(), data); err != nil {
log.Printf("Writer error: %v\n", err)
}
}
if !messages.IsDOMType(msg.TypeID()) || msg.TypeID() == messages.MsgTimestamp {
if err := writer.WriteDEV(msg.SessionID(), data); err != nil {
log.Printf("Writer error: %v\n", err)
}
if err := writer.Write(msg); err != nil {
log.Printf("writer error: %s", err)
return
}
// [METRICS] Increase the number of written to the files messages and the message size
messageSize.Record(context.Background(), float64(len(data)))
messageSize.Record(context.Background(), float64(len(msg.Encode())))
savedMessages.Add(context.Background(), 1)
}
@ -132,7 +126,8 @@ func main() {
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
tick := time.Tick(30 * time.Second)
tick := time.Tick(10 * time.Second)
tickInfo := time.Tick(30 * time.Second)
for {
select {
case sig := <-sigchan:
@ -146,10 +141,11 @@ func main() {
consumer.Close()
os.Exit(0)
case <-tick:
counter.Print()
if err := consumer.Commit(); err != nil {
log.Printf("can't commit messages: %s", err)
}
case <-tickInfo:
counter.Print()
log.Printf("writer: %s", writer.Info())
default:
err := consumer.ConsumeNext()

View file

@ -9,7 +9,8 @@ type Config struct {
common.Config
FsDir string `env:"FS_DIR,required"`
FsUlimit uint16 `env:"FS_ULIMIT,required"`
DeadSessionTimeout int64 `env:"DEAD_SESSION_TIMEOUT,default=600"`
FileBuffer int `env:"FILE_BUFFER,default=32768"`
SyncTimeout int `env:"SYNC_TIMEOUT,default=5"`
GroupSink string `env:"GROUP_SINK,required"`
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
TopicRawIOS string `env:"TOPIC_RAW_IOS,required"`

View file

@ -0,0 +1,57 @@
package sessionwriter
import (
"bufio"
"os"
)
type File struct {
file *os.File
buffer *bufio.Writer
updated bool
}
func NewFile(path string, bufSize int) (*File, error) {
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
if err != nil {
return nil, err
}
return &File{
file: file,
buffer: bufio.NewWriterSize(file, bufSize),
updated: false,
}, nil
}
func (f *File) Write(data []byte) error {
leftToWrite := len(data)
for leftToWrite > 0 {
writtenDown, err := f.buffer.Write(data)
if err != nil {
return err
}
leftToWrite -= writtenDown
}
f.updated = true
return nil
}
func (f *File) Sync() error {
if !f.updated {
return nil
}
if err := f.buffer.Flush(); err != nil {
return err
}
if err := f.file.Sync(); err != nil {
return err
}
f.updated = false
return nil
}
func (f *File) Close() error {
_ = f.buffer.Flush()
_ = f.file.Sync()
return f.file.Close()
}

View file

@ -0,0 +1,56 @@
package sessionwriter
import (
"math"
"sync"
"time"
)
type Meta struct {
limit int
lock *sync.Mutex
meta map[uint64]int64
}
func NewMeta(limit int) *Meta {
return &Meta{
limit: limit,
lock: &sync.Mutex{},
meta: make(map[uint64]int64, limit),
}
}
func (m *Meta) Add(sid uint64) {
m.lock.Lock()
m.meta[sid] = time.Now().Unix()
m.lock.Unlock()
}
func (m *Meta) Count() int {
m.lock.Lock()
defer m.lock.Unlock()
return len(m.meta)
}
func (m *Meta) Delete(sid uint64) {
m.lock.Lock()
delete(m.meta, sid)
m.lock.Unlock()
}
func (m *Meta) GetExtra() uint64 {
m.lock.Lock()
defer m.lock.Unlock()
if len(m.meta) >= m.limit {
var extraSessID uint64
var minTimestamp int64 = math.MaxInt64
for sessID, timestamp := range m.meta {
if timestamp < minTimestamp {
extraSessID = sessID
minTimestamp = timestamp
}
}
return extraSessID
}
return 0
}

View file

@ -1,81 +1,96 @@
package sessionwriter
import (
"encoding/binary"
"fmt"
"os"
"strconv"
"sync"
"time"
"openreplay/backend/pkg/messages"
)
type Session struct {
lock *sync.Mutex
dom *os.File
dev *os.File
lastUpdate time.Time
lock *sync.Mutex
dom *File
dev *File
index []byte
updated bool
}
func NewSession(dir string, id uint64) (*Session, error) {
if id == 0 {
func NewSession(sessID uint64, workDir string, bufSize int) (*Session, error) {
if sessID == 0 {
return nil, fmt.Errorf("wrong session id")
}
filePath := workDir + strconv.FormatUint(sessID, 10)
filePath := dir + strconv.FormatUint(id, 10)
domFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
dom, err := NewFile(filePath, bufSize)
if err != nil {
return nil, err
}
filePath += "devtools"
devFile, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
dev, err := NewFile(filePath+"devtools", bufSize)
if err != nil {
domFile.Close() // should close first file descriptor
dom.Close()
return nil, err
}
return &Session{
lock: &sync.Mutex{},
dom: domFile,
dev: devFile,
lastUpdate: time.Now(),
lock: &sync.Mutex{},
dom: dom,
dev: dev,
index: make([]byte, 8),
updated: false,
}, nil
}
func (s *Session) Lock() {
func (s *Session) Write(msg messages.Message) error {
s.lock.Lock()
}
defer s.lock.Unlock()
func (s *Session) Unlock() {
s.lock.Unlock()
}
// Encode message index
binary.LittleEndian.PutUint64(s.index, msg.Meta().Index)
func (s *Session) Write(mode FileType, data []byte) (err error) {
if mode == DOM {
_, err = s.dom.Write(data)
} else {
_, err = s.dev.Write(data)
// Write message to dom.mob file
if messages.IsDOMType(msg.TypeID()) {
// Write message index
if err := s.dom.Write(s.index); err != nil {
return err
}
// Write message body
if err := s.dom.Write(msg.Encode()); err != nil {
return err
}
}
s.lastUpdate = time.Now()
return err
}
func (s *Session) LastUpdate() time.Time {
return s.lastUpdate
s.updated = true
// Write message to dev.mob file
if !messages.IsDOMType(msg.TypeID()) || msg.TypeID() == messages.MsgTimestamp {
// Write message index
if err := s.dev.Write(s.index); err != nil {
return err
}
// Write message body
if err := s.dev.Write(msg.Encode()); err != nil {
return err
}
}
return nil
}
func (s *Session) Sync() error {
domErr := s.dom.Sync()
devErr := s.dev.Sync()
if domErr == nil && devErr == nil {
return nil
s.lock.Lock()
defer s.lock.Unlock()
if err := s.dom.Sync(); err != nil {
return err
}
return fmt.Errorf("dom: %s, dev: %s", domErr, devErr)
return s.dev.Sync()
}
func (s *Session) Close() error {
domErr := s.dom.Close()
devErr := s.dev.Close()
if domErr == nil && devErr == nil {
return nil
s.lock.Lock()
defer s.lock.Unlock()
if err := s.dom.Close(); err != nil {
return err
}
return fmt.Errorf("dom: %s, dev: %s", domErr, devErr)
return s.dev.Close()
}

View file

@ -1,8 +0,0 @@
package sessionwriter
type FileType int
const (
DOM FileType = 1
DEV FileType = 2
)

View file

@ -3,47 +3,89 @@ package sessionwriter
import (
"fmt"
"log"
"math"
"sync"
"time"
"openreplay/backend/pkg/messages"
)
type SessionWriter struct {
ulimit int
dir string
zombieSessionTimeout float64
lock *sync.Mutex
sessions *sync.Map
meta map[uint64]int64
done chan struct{}
stopped chan struct{}
filesLimit int
workingDir string
fileBuffer int
syncTimeout time.Duration
meta *Meta
sessions *sync.Map
done chan struct{}
stopped chan struct{}
}
func NewWriter(ulimit uint16, dir string, zombieSessionTimeout int64) *SessionWriter {
func NewWriter(filesLimit uint16, workingDir string, fileBuffer int, syncTimeout int) *SessionWriter {
w := &SessionWriter{
ulimit: int(ulimit) / 2, // should divide by 2 because each session has 2 files
dir: dir + "/",
zombieSessionTimeout: float64(zombieSessionTimeout),
lock: &sync.Mutex{},
sessions: &sync.Map{},
meta: make(map[uint64]int64, ulimit),
done: make(chan struct{}),
stopped: make(chan struct{}),
filesLimit: int(filesLimit) / 2, // should divide by 2 because each session has 2 files
workingDir: workingDir + "/",
fileBuffer: fileBuffer,
syncTimeout: time.Duration(syncTimeout) * time.Second,
meta: NewMeta(int(filesLimit)),
sessions: &sync.Map{},
done: make(chan struct{}),
stopped: make(chan struct{}),
}
go w.synchronizer()
return w
}
func (w *SessionWriter) WriteDOM(sid uint64, data []byte) error {
return w.write(sid, DOM, data)
func (w *SessionWriter) Write(msg messages.Message) (err error) {
var (
sess *Session
sid = msg.SessionID()
)
// Load session
sessObj, ok := w.sessions.Load(sid)
if !ok {
// Create new session
sess, err = NewSession(sid, w.workingDir, w.fileBuffer)
if err != nil {
return fmt.Errorf("can't create session: %d, err: %s", sid, err)
}
// Check opened sessions limit and close extra session if you need to
if extraSessID := w.meta.GetExtra(); extraSessID != 0 {
if err := w.Close(extraSessID); err != nil {
log.Printf("can't close session: %s", err)
}
}
// Add created session
w.sessions.Store(sid, sess)
w.meta.Add(sid)
} else {
sess = sessObj.(*Session)
}
// Write data to session
return sess.Write(msg)
}
func (w *SessionWriter) WriteDEV(sid uint64, data []byte) error {
return w.write(sid, DEV, data)
func (w *SessionWriter) sync(sid uint64) error {
sessObj, ok := w.sessions.Load(sid)
if !ok {
return fmt.Errorf("session: %d not found", sid)
}
sess := sessObj.(*Session)
return sess.Sync()
}
func (w *SessionWriter) Close(sid uint64) {
w.close(sid)
func (w *SessionWriter) Close(sid uint64) error {
sessObj, ok := w.sessions.LoadAndDelete(sid)
if !ok {
return fmt.Errorf("session: %d not found", sid)
}
sess := sessObj.(*Session)
err := sess.Close()
w.meta.Delete(sid)
return err
}
func (w *SessionWriter) Stop() {
@ -52,110 +94,11 @@ func (w *SessionWriter) Stop() {
}
func (w *SessionWriter) Info() string {
return fmt.Sprintf("%d sessions", w.numberOfSessions())
}
func (w *SessionWriter) addSession(sid uint64) {
w.lock.Lock()
w.meta[sid] = time.Now().Unix()
w.lock.Unlock()
}
func (w *SessionWriter) deleteSession(sid uint64) {
w.lock.Lock()
delete(w.meta, sid)
w.lock.Unlock()
}
func (w *SessionWriter) numberOfSessions() int {
w.lock.Lock()
defer w.lock.Unlock()
return len(w.meta)
}
func (w *SessionWriter) write(sid uint64, mode FileType, data []byte) error {
var (
sess *Session
err error
)
sessObj, ok := w.sessions.Load(sid)
if !ok {
sess, err = NewSession(w.dir, sid)
if err != nil {
return fmt.Errorf("can't write to session: %d, err: %s", sid, err)
}
sess.Lock()
defer sess.Unlock()
// Check opened files limit
if len(w.meta) >= w.ulimit {
var oldSessID uint64
var minTimestamp int64 = math.MaxInt64
for sessID, timestamp := range w.meta {
if timestamp < minTimestamp {
oldSessID = sessID
minTimestamp = timestamp
}
}
if err := w.close(oldSessID); err != nil {
log.Printf("can't close session: %s", err)
}
}
// Add new session to manager
w.sessions.Store(sid, sess)
w.addSession(sid)
} else {
sess = sessObj.(*Session)
sess.Lock()
defer sess.Unlock()
}
// Write data to session
return sess.Write(mode, data)
}
func (w *SessionWriter) sync(sid uint64) error {
sessObj, ok := w.sessions.Load(sid)
if !ok {
return fmt.Errorf("can't sync, session: %d not found", sid)
}
sess := sessObj.(*Session)
sess.Lock()
defer sess.Unlock()
err := sess.Sync()
if time.Now().Sub(sess.LastUpdate()).Seconds() > w.zombieSessionTimeout {
if err != nil {
log.Printf("can't sync session: %d, err: %s", sid, err)
}
// Close "zombie" session
err = sess.Close()
w.deleteSession(sid)
}
return err
}
func (w *SessionWriter) close(sid uint64) error {
sessObj, ok := w.sessions.LoadAndDelete(sid)
if !ok {
return fmt.Errorf("can't close, session: %d not found", sid)
}
sess := sessObj.(*Session)
sess.Lock()
defer sess.Unlock()
if err := sess.Sync(); err != nil {
log.Printf("can't sync session: %d, err: %s", sid, err)
}
err := sess.Close()
w.deleteSession(sid)
return err
return fmt.Sprintf("%d sessions", w.meta.Count())
}
func (w *SessionWriter) synchronizer() {
tick := time.Tick(2 * time.Second)
tick := time.Tick(w.syncTimeout)
for {
select {
case <-tick:
@ -167,7 +110,7 @@ func (w *SessionWriter) synchronizer() {
})
case <-w.done:
w.sessions.Range(func(sid, lockObj any) bool {
if err := w.close(sid.(uint64)); err != nil {
if err := w.Close(sid.(uint64)); err != nil {
log.Printf("can't close file descriptor: %s", err)
}
return true

View file

@ -100,6 +100,7 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
tp: msgType,
size: i.size,
reader: reader,
raw: batchData,
skipped: &i.canSkip,
broken: &i.broken,
meta: i.messageInfo,

View file

@ -8,13 +8,17 @@ import (
"log"
)
var (
one = []byte{0}
three = []byte{0, 0, 0}
)
func ReadByte(reader io.Reader) (byte, error) {
p := make([]byte, 1)
_, err := io.ReadFull(reader, p)
_, err := io.ReadFull(reader, one)
if err != nil {
return 0, err
}
return p[0], nil
return one[0], nil
}
func ReadData(reader io.Reader) ([]byte, error) {
@ -156,8 +160,7 @@ func WriteSize(size uint64, buf []byte, p int) {
}
func ReadSize(reader io.Reader) (uint64, error) {
buf := make([]byte, 3)
n, err := io.ReadFull(reader, buf)
n, err := io.ReadFull(reader, three)
if err != nil {
return 0, err
}
@ -165,7 +168,7 @@ func ReadSize(reader io.Reader) (uint64, error) {
return 0, fmt.Errorf("read only %d of 3 size bytes", n)
}
var size uint64
for i, b := range buf {
for i, b := range three {
size += uint64(b) << (8 * i)
}
return size, nil

View file

@ -13,6 +13,7 @@ type RawMessage struct {
size uint64
data []byte
reader *bytes.Reader
raw []byte
meta *message
encoded bool
skipped *bool
@ -23,15 +24,25 @@ func (m *RawMessage) Encode() []byte {
if m.encoded {
return m.data
}
m.data = make([]byte, m.size+1)
m.data[0] = uint8(m.tp)
m.encoded = true
*m.skipped = false
_, err := io.ReadFull(m.reader, m.data[1:])
if err != nil {
log.Printf("message encode err: %s, type: %d, sess: %d", err, m.tp, m.SessionID())
// Try to avoid EOF error
if m.reader.Len() < int(m.size) {
return nil
}
// Get current batch position
currPos, err := m.reader.Seek(0, io.SeekCurrent)
if err != nil {
log.Printf("can't get current batch position: %s", err)
return nil
}
// "Move" message type
if currPos == 0 {
log.Printf("can't move message type, curr position = %d", currPos)
return nil
}
// Dirty hack to avoid extra memory allocation
m.raw[currPos-1] = uint8(m.tp)
m.data = m.raw[currPos-1 : currPos+int64(m.size)]
m.encoded = true
return m.data
}

View file

@ -2,6 +2,7 @@ package redisstream
import (
"log"
"regexp"
"github.com/go-redis/redis"
@ -14,9 +15,20 @@ func getRedisClient() *redis.Client {
if redisClient != nil {
return redisClient
}
redisClient = redis.NewClient(&redis.Options{
Addr: env.String("REDIS_STRING"),
})
connectionString := env.String("REDIS_STRING")
match, _ := regexp.MatchString("^[^:]+://", connectionString)
if !match {
connectionString = "redis://" + connectionString
}
options, err := redis.ParseURL(connectionString)
if err != nil {
log.Fatalln(err)
}
redisClient = redis.NewClient(options)
if _, err := redisClient.Ping().Result(); err != nil {
log.Fatalln(err)
}

View file

@ -279,7 +279,8 @@ def update(metric_id, user_id, project_id, data: schemas.UpdateCustomMetricsSche
params = {"metric_id": metric_id, "is_public": data.is_public, "name": data.name,
"user_id": user_id, "project_id": project_id, "view_type": data.view_type,
"metric_type": data.metric_type, "metric_of": data.metric_of,
"metric_value": data.metric_value, "metric_format": data.metric_format}
"metric_value": data.metric_value, "metric_format": data.metric_format,
"config": json.dumps(data.config.dict())}
for i, s in enumerate(data.series):
prefix = "u_"
if s.index is None:
@ -329,7 +330,8 @@ def update(metric_id, user_id, project_id, data: schemas.UpdateCustomMetricsSche
view_type= %(view_type)s, metric_type= %(metric_type)s,
metric_of= %(metric_of)s, metric_value= %(metric_value)s,
metric_format= %(metric_format)s,
edited_at = timezone('utc'::text, now())
edited_at = timezone('utc'::text, now()),
default_config = %(config)s
WHERE metric_id = %(metric_id)s
AND project_id = %(project_id)s
AND (user_id = %(user_id)s OR is_public)

View file

@ -118,6 +118,8 @@ def get_dashboard(project_id, user_id, dashboard_id):
for w in row["widgets"]:
w["created_at"] = TimeUTC.datetime_to_timestamp(w["created_at"])
w["edited_at"] = TimeUTC.datetime_to_timestamp(w["edited_at"])
w["config"]["col"] = w["default_config"]["col"]
w["config"]["row"] = w["default_config"]["row"]
for s in w["series"]:
s["created_at"] = TimeUTC.datetime_to_timestamp(s["created_at"])
return helper.dict_to_camel_case(row)

View file

@ -87,22 +87,23 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
{"tenant_id": tenant_id, "user_id": user_id, "now": TimeUTC.now()})
cur.execute(query)
rows = cur.fetchall()
# if recorded is requested, check if it was saved or computed
if recorded:
for r in rows:
u_values = []
params = {}
for i, r in enumerate(rows):
if r["first_recorded_session_at"] is None:
extra_update = ""
if r["recorded"]:
extra_update = ", first_recorded_session_at=to_timestamp(%(first_recorded)s/1000)"
query = cur.mogrify(f"""UPDATE public.projects
SET sessions_last_check_at=(now() at time zone 'utc')
{extra_update}
WHERE project_id=%(project_id)s""",
{"project_id": r["project_id"], "first_recorded": r["first_recorded"]})
cur.execute(query)
u_values.append(f"(%(project_id_{i})s,to_timestamp(%(first_recorded_{i})s/1000))")
params[f"project_id_{i}"] = r["project_id"]
params[f"first_recorded_{i}"] = r["first_recorded"] if r["recorded"] else None
r.pop("first_recorded_session_at")
r.pop("first_recorded")
if len(u_values) > 0:
query = cur.mogrify(f"""UPDATE public.projects
SET sessions_last_check_at=(now() at time zone 'utc'), first_recorded_session_at=u.first_recorded
FROM (VALUES {",".join(u_values)}) AS u(project_id,first_recorded)
WHERE projects.project_id=u.project_id;""", params)
cur.execute(query)
if recording_state and len(rows) > 0:
project_ids = [f'({r["project_id"]})' for r in rows]
@ -112,6 +113,7 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
WHERE sessions.start_ts >= %(startDate)s AND sessions.start_ts <= %(endDate)s
GROUP BY project_id;""",
{"startDate": TimeUTC.now(delta_days=-3), "endDate": TimeUTC.now(delta_days=1)})
cur.execute(query=query)
status = cur.fetchall()
for r in rows:

View file

@ -198,7 +198,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
GROUP BY main.session_id)
AS T{i + 1} {"ON (TRUE)" if i > 0 else ""}
""")
n_stages=len(n_stages_query)
n_stages = len(n_stages_query)
if n_stages == 0:
return []
n_stages_query = " LEFT JOIN LATERAL ".join(n_stages_query)
@ -222,7 +222,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
AND ISE.session_id = stages_t.session_id
AND ISS.type!='custom' -- ignore custom issues because they are massive
{"AND ISS.type IN %(issueTypes)s" if len(filter_issues) > 0 else ""}
LIMIT 50 -- remove the limit to get exact stats
LIMIT 10 -- remove the limit to get exact stats
) AS issues_t ON (TRUE)
) AS stages_and_issues_t INNER JOIN sessions USING(session_id);
"""
@ -355,7 +355,7 @@ def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues,
if error_id not in errors:
errors[error_id] = []
ic = 0
row_issue_id=row['issue_id']
row_issue_id = row['issue_id']
if row_issue_id is not None:
if last_ts is None or (first_ts < row['issue_timestamp'] < last_ts):
if error_id == row_issue_id:
@ -540,6 +540,9 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
if is_sign:
n_critical_issues += n_issues_dict[issue_id]
# To limit the number of returned issues to the frontend
issues_dict["significant"] = issues_dict["significant"][:20]
issues_dict["insignificant"] = issues_dict["insignificant"][:20]
return n_critical_issues, issues_dict, total_drop_due_to_issues

View file

@ -77,4 +77,6 @@ DROP INDEX IF EXISTS events_common.requests_url_gin_idx2;
DROP INDEX IF EXISTS events.resources_url_gin_idx;
DROP INDEX IF EXISTS events.resources_url_idx;
COMMIT;
COMMIT;
CREATE INDEX CONCURRENTLY IF NOT EXISTS requests_session_id_status_code_nn_idx ON events_common.requests (session_id, status_code) WHERE status_code IS NOT NULL;

View file

@ -1228,6 +1228,7 @@ $$
CREATE INDEX IF NOT EXISTS requests_request_body_nn_gin_idx ON events_common.requests USING GIN (request_body gin_trgm_ops) WHERE request_body IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_response_body_nn_gin_idx ON events_common.requests USING GIN (response_body gin_trgm_ops) WHERE response_body IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_status_code_nn_idx ON events_common.requests (status_code) WHERE status_code IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_session_id_status_code_nn_idx ON events_common.requests (session_id, status_code) WHERE status_code IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_host_nn_idx ON events_common.requests (host) WHERE host IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_host_nn_gin_idx ON events_common.requests USING GIN (host gin_trgm_ops) WHERE host IS NOT NULL;
CREATE INDEX IF NOT EXISTS requests_path_nn_idx ON events_common.requests (path) WHERE path IS NOT NULL;

View file

@ -126,8 +126,9 @@ class Router extends React.Component {
}
fetchInitialData = async () => {
const siteIdFromPath = parseInt(window.location.pathname.split("/")[1])
await this.props.fetchUserInfo()
await this.props.fetchSiteList()
await this.props.fetchSiteList(siteIdFromPath)
const { mstore } = this.props;
mstore.initClient();
};

View file

@ -5,7 +5,7 @@ import { countries } from 'App/constants';
import { useStore } from 'App/mstore';
import { browserIcon, osIcon, deviceTypeIcon } from 'App/iconNames';
import { formatTimeOrDate } from 'App/date';
import { Avatar, TextEllipsis, CountryFlag, Icon, Tooltip } from 'UI';
import { Avatar, TextEllipsis, CountryFlag, Icon, Tooltip, Popover } from 'UI';
import cn from 'classnames';
import { withRequest } from 'HOCs';
import SessionInfoItem from '../../SessionInfoItem';
@ -75,15 +75,9 @@ function UserCard({ className, request, session, width, height, similarSessions,
{userBrowser}, {userOs}, {userDevice}
</span>
<span className="mx-1 font-bold text-xl">&#183;</span>
<Tooltip
// theme="light"
delay={0}
// hideOnClick="persistent"
// arrow
// interactive
className="!bg-white shadow border !color-gray-dark"
title={
<div className="text-left">
<Popover
render={() => (
<div className="text-left bg-white">
<SessionInfoItem
comp={<CountryFlag country={userCountry} />}
label={countries[userCountry]}
@ -99,14 +93,10 @@ function UserCard({ className, request, session, width, height, similarSessions,
/>
{revId && <SessionInfoItem icon="info" label="Rev ID:" value={revId} isLast />}
</div>
}
position="bottom"
// hoverable
// disabled={false}
on="hover"
)}
>
<span className="color-teal cursor-pointer">More</span>
</Tooltip>
<span className="link">More</span>
</Popover>
</div>
</div>
</div>

View file

@ -76,8 +76,6 @@ class IssueForm extends React.PureComponent {
const selectedIssueType = issueTypes.filter((issue) => issue.id == instance.issueType)[0];
console.log('instance', instance);
return (
<Loader loading={projectsLoading} size={40}>
<Form onSubmit={this.onSubmit} className="text-left">

View file

@ -70,6 +70,7 @@ class Issues extends React.Component {
<div className="relative h-full w-full p-3">
<div className={stl.buttonWrapper}>
<Popover
onOpen={this.handleOpen}
render={({ close }) => (
<div>
<IssuesModal

View file

@ -175,8 +175,7 @@ function FilterValue(props: Props) {
};
return (
//
<div className={cn("grid gap-3 w-full", { 'grid-cols-2': filter.hasSource, 'grid-cols-3' : !filter.hasSource })}>
<div className={cn("grid gap-3", { 'grid-cols-2': filter.hasSource, 'grid-cols-3' : !filter.hasSource })}>
{filter.type === FilterType.DURATION
? renderValueFiled(filter.value, 0)
: filter.value &&

View file

@ -16,7 +16,7 @@ function LatestSessionsMessage(props: Props) {
style={{ backgroundColor: 'rgb(255 251 235)' }}
onClick={() => props.updateCurrentPage(1)}
>
Show {numberWithCommas(count)} new Sessions
Show {numberWithCommas(count)} New {count > 1 ? 'Sessions' : 'Session'}
</div>
) : (
<></>

View file

@ -1,4 +1,4 @@
import React, { cloneElement, useMemo, useState } from 'react';
import React, { cloneElement, useEffect, useMemo, useState } from 'react';
import {
Placement,
offset,
@ -20,11 +20,18 @@ interface Props {
render: (data: { close: () => void; labelId: string; descriptionId: string }) => React.ReactNode;
placement?: Placement;
children: JSX.Element;
onOpen?: () => void;
}
const Popover = ({ children, render, placement }: Props) => {
const Popover = ({ children, render, placement, onOpen = () => {} }: Props) => {
const [open, setOpen] = useState(false);
useEffect(() => {
if (open) {
onOpen();
}
}, [open]);
const { x, y, reference, floating, strategy, context } = useFloating({
open,
onOpenChange: setOpen,

View file

@ -65,7 +65,9 @@ const reducer = (state = initialState, action = {}) => {
case FETCH_LIST_SUCCESS:
let siteId = state.get("siteId");
const siteExists = action.data.map(s => s.projectId).includes(siteId);
if (!siteId || !siteExists) {
if (action.siteIdFromPath) {
siteId = action.siteIdFromPath;
} else if (!siteId || !siteExists) {
siteId = !!action.data.find(s => s.projectId === parseInt(storedSiteId))
? storedSiteId
: action.data[0].projectId;
@ -83,7 +85,7 @@ const reducer = (state = initialState, action = {}) => {
.set('active', list.find(s => s.id === parseInt(siteId)));
case SET_SITE_ID:
localStorage.setItem(SITE_ID_STORAGE_KEY, action.siteId)
const site = state.get('list').find(s => s.id === action.siteId);
const site = state.get('list').find(s => parseInt(s.id) == action.siteId);
return state.set('siteId', action.siteId).set('active', site);
}
return state;
@ -110,10 +112,11 @@ export function saveGDPR(siteId, gdpr) {
};
}
export function fetchList() {
export function fetchList(siteId) {
return {
types: array(FETCH_LIST),
call: client => client.get('/projects'),
siteIdFromPath: siteId
};
}

View file

@ -131,7 +131,7 @@ export default class Widget {
series: this.series.map((series: any) => series.toJson()),
config: {
...this.config,
col: this.metricType === 'funnel' || this.metricOf === FilterKey.ERRORS || this.metricOf === FilterKey.SESSIONS ? 4 : this.config.col
col: (this.metricType === 'funnel' || this.metricOf === FilterKey.ERRORS || this.metricOf === FilterKey.SESSIONS) ? 4 : 2
},
}
}

View file

@ -73,7 +73,7 @@ export const filterList = <T extends Record<string, any>>(
if (searchQuery === '') return list;
const filterRE = getRE(searchQuery, 'i');
let _list = list.filter((listItem: T) => {
return testKeys.some((key) => filterRE.test(listItem[key]) || searchCb?.(listItem, filterRE));
return testKeys.some((key) => filterRE.test(listItem[key])) || searchCb?.(listItem, filterRE);
});
return _list;
}

View file

@ -69,8 +69,6 @@ spec:
value: '{{ .Values.global.s3.endpoint }}'
- name: AWS_REGION
value: '{{ .Values.global.s3.region }}'
- name: REDIS_STRING
value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}'
- name: KAFKA_SERVERS
value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}'
- name: KAFKA_USE_SSL
@ -95,6 +93,7 @@ spec:
# S3 compatible storage
value: '{{ .Values.global.s3.endpoint }}/{{.Values.global.s3.assetsBucket}}'
{{- end }}
{{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }}
{{- range $key, $val := .Values.env }}
- name: {{ $key }}
value: '{{ $val }}'
@ -105,14 +104,16 @@ spec:
containerPort: {{ $val }}
protocol: TCP
{{- end }}
{{- with .Values.persistence.mounts }}
volumeMounts:
{{- include "openreplay.volume.redis_ca_certificate.mount" .Values.global.redis | nindent 12 }}
{{- with .Values.persistence.mounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.persistence.volumes }}
volumes:
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 8 }}
{{- with .Values.persistence.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}

View file

@ -47,8 +47,6 @@ spec:
value: '{{ .Values.global.clickhouse.chHost }}:{{.Values.global.clickhouse.service.webPort}}/{{.Values.env.ch_db}}'
- name: LICENSE_KEY
value: '{{ .Values.global.enterpriseEditionLicense }}'
- name: REDIS_STRING
value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}'
- name: KAFKA_SERVERS
value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}'
- name: KAFKA_USE_SSL
@ -66,6 +64,7 @@ spec:
value: '{{ .Values.global.quickwit.enabled }}'
- name: POSTGRES_STRING
value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:$(pg_password)@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}'
{{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }}
{{- range $key, $val := .Values.env }}
- name: {{ $key }}
value: '{{ $val }}'
@ -76,16 +75,18 @@ spec:
containerPort: {{ $val }}
protocol: TCP
{{- end }}
{{- with .Values.persistence.mounts }}
volumeMounts:
{{- include "openreplay.volume.redis_ca_certificate.mount" .Values.global.redis | nindent 12 }}
{{- with .Values.persistence.mounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.persistence.volumes }}
volumes:
{{- with .Values.persistence.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 8 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}

View file

@ -45,8 +45,6 @@ spec:
env:
- name: LICENSE_KEY
value: '{{ .Values.global.enterpriseEditionLicense }}'
- name: REDIS_STRING
value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}'
- name: KAFKA_SERVERS
value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}'
- name: KAFKA_USE_SSL
@ -62,6 +60,7 @@ spec:
{{- end}}
- name: POSTGRES_STRING
value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:$(pg_password)@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}'
{{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }}
{{- range $key, $val := .Values.env }}
- name: {{ $key }}
value: '{{ $val }}'
@ -72,16 +71,18 @@ spec:
containerPort: {{ $val }}
protocol: TCP
{{- end }}
{{- with .Values.persistence.mounts }}
volumeMounts:
{{- include "openreplay.volume.redis_ca_certificate.mount" .Values.global.redis | nindent 12 }}
{{- with .Values.persistence.mounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.persistence.volumes }}
volumes:
{{- with .Values.persistence.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 8 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}

View file

@ -65,8 +65,6 @@ spec:
value: '{{ .Values.global.s3.region }}'
- name: LICENSE_KEY
value: '{{ .Values.global.enterpriseEditionLicense }}'
- name: REDIS_STRING
value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}'
- name: KAFKA_SERVERS
value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}'
- name: KAFKA_USE_SSL
@ -102,6 +100,7 @@ spec:
# S3 compatible storage
value: '{{ .Values.global.s3.endpoint }}/{{.Values.global.s3.assetsBucket}}'
{{- end }}
{{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }}
{{- range $key, $val := .Values.env }}
- name: {{ $key }}
value: '{{ $val }}'
@ -112,6 +111,11 @@ spec:
containerPort: {{ $val }}
protocol: TCP
{{- end }}
volumeMounts:
{{- include "openreplay.volume.redis_ca_certificate.mount" .Values.global.redis | nindent 12 }}
{{- with .Values.persistence.mounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
@ -126,3 +130,8 @@ spec:
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumes:
{{- with .Values.persistence.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 8 }}

View file

@ -99,3 +99,5 @@ nodeSelector: {}
tolerations: []
affinity: {}
persistence: {}

View file

@ -45,12 +45,11 @@ spec:
env:
- name: LICENSE_KEY
value: '{{ .Values.global.enterpriseEditionLicense }}'
- name: REDIS_STRING
value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}'
- name: KAFKA_SERVERS
value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}'
- name: KAFKA_USE_SSL
value: '{{ .Values.global.kafka.kafkaUseSsl }}'
{{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }}
{{- range $key, $val := .Values.env }}
- name: {{ $key }}
value: '{{ $val }}'
@ -61,16 +60,18 @@ spec:
containerPort: {{ $val }}
protocol: TCP
{{- end }}
{{- with .Values.persistence.mounts }}
volumeMounts:
{{- include "openreplay.volume.redis_ca_certificate.mount" .Values.global.redis | nindent 12 }}
{{- with .Values.persistence.mounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.persistence.volumes }}
volumes:
{{- with .Values.persistence.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 8 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}

View file

@ -65,8 +65,6 @@ spec:
value: '{{ .Values.global.s3.region }}'
- name: LICENSE_KEY
value: '{{ .Values.global.enterpriseEditionLicense }}'
- name: REDIS_STRING
value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}'
- name: KAFKA_SERVERS
value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}'
- name: KAFKA_USE_SSL
@ -102,6 +100,7 @@ spec:
# S3 compatible storage
value: '{{ .Values.global.s3.endpoint }}/{{.Values.global.s3.assetsBucket}}'
{{- end }}
{{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }}
{{- range $key, $val := .Values.env }}
- name: {{ $key }}
value: '{{ $val }}'
@ -112,14 +111,16 @@ spec:
containerPort: {{ $val }}
protocol: TCP
{{- end }}
{{- with .Values.persistence.mounts }}
volumeMounts:
{{- include "openreplay.volume.redis_ca_certificate.mount" .Values.global.redis | nindent 12 }}
{{- with .Values.persistence.mounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.persistence.volumes }}
volumes:
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 8 }}
{{- with .Values.persistence.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}

View file

@ -45,8 +45,6 @@ spec:
env:
- name: LICENSE_KEY
value: '{{ .Values.global.enterpriseEditionLicense }}'
- name: REDIS_STRING
value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}'
- name: KAFKA_SERVERS
value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}'
- name: KAFKA_USE_SSL
@ -62,6 +60,7 @@ spec:
{{- end}}
- name: POSTGRES_STRING
value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:$(pg_password)@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}'
{{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }}
{{- range $key, $val := .Values.env }}
- name: {{ $key }}
value: '{{ $val }}'
@ -72,14 +71,16 @@ spec:
containerPort: {{ $val }}
protocol: TCP
{{- end }}
{{- with .Values.persistence.mounts }}
volumeMounts:
{{- include "openreplay.volume.redis_ca_certificate.mount" .Values.global.redis | nindent 12 }}
{{- with .Values.persistence.mounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.persistence.volumes }}
volumes:
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 8 }}
{{- with .Values.persistence.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}

View file

@ -45,8 +45,6 @@ spec:
env:
- name: LICENSE_KEY
value: '{{ .Values.global.enterpriseEditionLicense }}'
- name: REDIS_STRING
value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}'
- name: KAFKA_SERVERS
value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}'
- name: KAFKA_USE_SSL
@ -71,6 +69,7 @@ spec:
# S3 compatible storage
value: '{{ .Values.global.s3.endpoint }}/{{.Values.global.s3.assetsBucket}}'
{{- end }}
{{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }}
{{- range $key, $val := .Values.env }}
- name: {{ $key }}
value: '{{ $val }}'
@ -86,6 +85,7 @@ spec:
volumeMounts:
- name: datadir
mountPath: /mnt/efs
{{- include "openreplay.volume.redis_ca_certificate.mount" .Values.global.redis | nindent 10 }}
{{- with .Values.persistence.mounts }}
{{- toYaml . | nindent 10 }}
{{- end }}
@ -96,6 +96,7 @@ spec:
# Ensure the file directory is created.
path: {{ .Values.pvc.hostMountPath }}
type: DirectoryOrCreate
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 6 }}
{{- with .Values.persistence.volumes }}
{{- toYaml . | nindent 6 }}
{{- end }}
@ -104,6 +105,7 @@ spec:
- name: datadir
persistentVolumeClaim:
claimName: {{ .Values.pvc.name }}
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 6 }}
{{- with .Values.persistence.volumes }}
{{- toYaml . | nindent 8 }}
{{- end }}

View file

@ -73,12 +73,11 @@ spec:
value: {{ .Values.global.s3.recordingsBucket }}
- name: LICENSE_KEY
value: '{{ .Values.global.enterpriseEditionLicense }}'
- name: REDIS_STRING
value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}'
- name: KAFKA_SERVERS
value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}'
- name: KAFKA_USE_SSL
value: '{{ .Values.global.kafka.kafkaUseSsl }}'
{{- include "openreplay.env.redis_string" .Values.global.redis | nindent 12 }}
{{- range $key, $val := .Values.env }}
- name: {{ $key }}
value: '{{ $val }}'
@ -94,6 +93,7 @@ spec:
volumeMounts:
- name: datadir
mountPath: /mnt/efs
{{- include "openreplay.volume.redis_ca_certificate.mount" .Values.global.redis | nindent 10 }}
{{- with .Values.persistence.mounts }}
{{- toYaml . | nindent 10 }}
{{- end }}
@ -116,6 +116,7 @@ spec:
persistentVolumeClaim:
claimName: {{ .Values.pvc.name }}
{{- end }}
{{- include "openreplay.volume.redis_ca_certificate" .Values.global.redis | nindent 6 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}

View file

@ -60,3 +60,45 @@ Create the name of the service account to use
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}
{{/*
Create the environment configuration for REDIS_STRING
*/}}
{{- define "openreplay.env.redis_string" -}}
{{- $scheme := (eq (.tls | default dict).enabled true) | ternary "rediss" "redis" -}}
{{- $auth := "" -}}
{{- if or .existingSecret .redisPassword -}}
{{- $auth = printf "%s:$(REDIS_PASSWORD)@" (.redisUsername | default "") -}}
{{- end -}}
{{- if .existingSecret -}}
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
name: {{ .existingSecret }}
key: redis-password
{{- else if .redisPassword }}
- name: REDIS_PASSWORD
value: {{ .redisPassword }}
{{- end}}
- name: REDIS_STRING
value: '{{ $scheme }}://{{ $auth }}{{ .redisHost }}:{{ .redisPort }}'
{{- end }}
{{/*
Create the volume mount config for redis TLS certificates
*/}}
{{- define "openreplay.volume.redis_ca_certificate" -}}
{{- if and ((.tls | default dict).enabled) (.tls.certificatesSecret) (.tls.certCAFilename) -}}
- name: redis-ca-certificate
secret:
secretName: {{ .tls.certificatesSecret }}
{{- end }}
{{- end }}
{{- define "openreplay.volume.redis_ca_certificate.mount" -}}
{{- if and ((.tls |default dict).enabled) (.tls.certificatesSecret) (.tls.certCAFilename) -}}
- name: redis-ca-certificate
mountPath: /etc/ssl/certs/redis-ca-certificate.pem
subPath: {{ .tls.certCAFilename }}
{{- end }}
{{- end }}

View file

@ -67,4 +67,6 @@ DROP INDEX IF EXISTS events_common.requests_url_gin_idx2;
DROP INDEX IF EXISTS events.resources_url_gin_idx;
DROP INDEX IF EXISTS events.resources_url_idx;
COMMIT;
COMMIT;
CREATE INDEX CONCURRENTLY IF NOT EXISTS requests_session_id_status_code_nn_idx ON events_common.requests (session_id, status_code) WHERE status_code IS NOT NULL;

View file

@ -603,6 +603,7 @@ $$
CREATE INDEX requests_request_body_nn_gin_idx ON events_common.requests USING GIN (request_body gin_trgm_ops) WHERE request_body IS NOT NULL;
CREATE INDEX requests_response_body_nn_gin_idx ON events_common.requests USING GIN (response_body gin_trgm_ops) WHERE response_body IS NOT NULL;
CREATE INDEX requests_status_code_nn_idx ON events_common.requests (status_code) WHERE status_code IS NOT NULL;
CREATE INDEX requests_session_id_status_code_nn_idx ON events_common.requests (session_id, status_code) WHERE status_code IS NOT NULL;
CREATE INDEX requests_host_nn_idx ON events_common.requests (host) WHERE host IS NOT NULL;
CREATE INDEX requests_host_nn_gin_idx ON events_common.requests USING GIN (host gin_trgm_ops) WHERE host IS NOT NULL;
CREATE INDEX requests_path_nn_idx ON events_common.requests (path) WHERE path IS NOT NULL;

View file

@ -49,7 +49,7 @@ type OptionalCallback = (()=>Record<string, unknown>) | void
type Agent = {
onDisconnect?: OptionalCallback,
onControlReleased?: OptionalCallback,
agentInfo: Record<string, string>
agentInfo: Record<string, string> | undefined
//
}
@ -229,9 +229,10 @@ export default class Assist {
})
socket.on('AGENTS_CONNECTED', (ids: string[]) => {
ids.forEach(id =>{
const agentInfo = this.agents[id]?.agentInfo
this.agents[id] = {
...this.agents[id],
onDisconnect: this.options.onAgentConnect?.( this.agents[id].agentInfo),
agentInfo,
onDisconnect: this.options.onAgentConnect?.(agentInfo),
}
})
this.assistDemandedRestart = true

View file

@ -1,7 +1,7 @@
{
"name": "@openreplay/tracker",
"description": "The OpenReplay tracker main package",
"version": "4.1.8",
"version": "4.1.9",
"keywords": [
"logging",
"replay"