Merge branch 'dev' into live-se-red

This commit is contained in:
nick-delirium 2024-11-28 12:02:22 +01:00
commit 92412685b0
No known key found for this signature in database
GPG key ID: 93ABD695DF5FDBA0
104 changed files with 3285 additions and 2737 deletions

View file

@ -1,3 +1,4 @@
#!/bin/sh
export TZ=UTC
uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --proxy-headers --log-level ${S_LOGLEVEL:-warning}

View file

@ -1,3 +1,4 @@
#!/bin/sh
export TZ=UTC
export ASSIST_KEY=ignore
uvicorn app:app --host 0.0.0.0 --port 8888 --log-level ${S_LOGLEVEL:-warning}

View file

@ -1,3 +1,4 @@
#!/bin/zsh
export TZ=UTC
uvicorn app_alerts:app --reload --port 8888 --log-level ${S_LOGLEVEL:-warning}

View file

@ -1,3 +1,4 @@
#!/bin/zsh
export TZ=UTC
uvicorn app:app --reload --log-level ${S_LOGLEVEL:-warning}

View file

@ -1357,6 +1357,7 @@ class LiveFilterType(str, Enum):
USER_BROWSER = FilterType.USER_BROWSER.value
USER_DEVICE = FilterType.USER_DEVICE.value
USER_COUNTRY = FilterType.USER_COUNTRY.value
USER_CITY = FilterType.USER_CITY.value
USER_STATE = FilterType.USER_STATE.value
USER_ID = FilterType.USER_ID.value
USER_ANONYMOUS_ID = FilterType.USER_ANONYMOUS_ID.value

View file

@ -2,30 +2,27 @@ package main
import (
"context"
"os"
"os/signal"
"syscall"
"openreplay/backend/internal/config/http"
"openreplay/backend/internal/http/router"
"openreplay/backend/internal/http/server"
"openreplay/backend/internal/http/services"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/db/redis"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/metrics"
databaseMetrics "openreplay/backend/pkg/metrics/database"
httpMetrics "openreplay/backend/pkg/metrics/http"
"openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/server"
"openreplay/backend/pkg/server/api"
)
func main() {
ctx := context.Background()
log := logger.New()
cfg := http.New(log)
metrics.New(log, append(httpMetrics.List(), databaseMetrics.List()...))
webMetrics := web.New("http")
metrics.New(log, append(webMetrics.List(), databaseMetrics.List()...))
// Connect to queue
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
defer producer.Close(15000)
@ -37,38 +34,21 @@ func main() {
redisClient, err := redis.New(&cfg.Redis)
if err != nil {
log.Warn(ctx, "can't init redis connection: %s", err)
log.Info(ctx, "no redis cache: %s", err)
}
defer redisClient.Close()
services, err := services.New(log, cfg, producer, pgConn, redisClient)
builder, err := services.New(log, cfg, webMetrics, producer, pgConn, redisClient)
if err != nil {
log.Fatal(ctx, "failed while creating services: %s", err)
}
router, err := router.NewRouter(cfg, log, services)
router, err := api.NewRouter(&cfg.HTTP, log)
if err != nil {
log.Fatal(ctx, "failed while creating router: %s", err)
}
router.AddHandlers(api.NoPrefix, builder.WebAPI, builder.MobileAPI, builder.ConditionsAPI, builder.FeatureFlagsAPI,
builder.TagsAPI, builder.UxTestsAPI)
server, err := server.New(router.GetHandler(), cfg.HTTPHost, cfg.HTTPPort, cfg.HTTPTimeout)
if err != nil {
log.Fatal(ctx, "failed while creating server: %s", err)
}
// Run server
go func() {
if err := server.Start(); err != nil {
log.Fatal(ctx, "http server error: %s", err)
}
}()
log.Info(ctx, "server successfully started on port %s", cfg.HTTPPort)
// Wait stop signal to shut down server gracefully
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
<-sigchan
log.Info(ctx, "shutting down the server")
server.Stop()
server.Run(ctx, log, &cfg.HTTP, router)
}

View file

@ -2,24 +2,24 @@ package main
import (
"context"
"os"
"os/signal"
"syscall"
config "openreplay/backend/internal/config/integrations"
"openreplay/backend/internal/http/server"
"openreplay/backend/pkg/db/postgres/pool"
integration "openreplay/backend/pkg/integrations"
"openreplay/backend/pkg/integrations"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/metrics"
"openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/server"
"openreplay/backend/pkg/server/api"
)
func main() {
ctx := context.Background()
log := logger.New()
cfg := config.New(log)
metrics.New(log, append(database.List()))
webMetrics := web.New("integrations")
metrics.New(log, append(webMetrics.List(), database.List()...))
pgConn, err := pool.New(cfg.Postgres.String())
if err != nil {
@ -27,31 +27,17 @@ func main() {
}
defer pgConn.Close()
services, err := integration.NewServiceBuilder(log, cfg, pgConn)
builder, err := integrations.NewServiceBuilder(log, cfg, webMetrics, pgConn)
if err != nil {
log.Fatal(ctx, "can't init services: %s", err)
}
router, err := integration.NewRouter(cfg, log, services)
router, err := api.NewRouter(&cfg.HTTP, log)
if err != nil {
log.Fatal(ctx, "failed while creating router: %s", err)
}
router.AddHandlers(api.NoPrefix, builder.IntegrationsAPI)
router.AddMiddlewares(builder.Auth.Middleware, builder.RateLimiter.Middleware, builder.AuditTrail.Middleware)
dataIntegrationServer, err := server.New(router.GetHandler(), cfg.HTTPHost, cfg.HTTPPort, cfg.HTTPTimeout)
if err != nil {
log.Fatal(ctx, "failed while creating server: %s", err)
}
go func() {
if err := dataIntegrationServer.Start(); err != nil {
log.Fatal(ctx, "http server error: %s", err)
}
}()
log.Info(ctx, "server successfully started on port %s", cfg.HTTPPort)
// Wait stop signal to shut down server gracefully
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
<-sigchan
log.Info(ctx, "shutting down the server")
dataIntegrationServer.Stop()
server.Run(ctx, log, &cfg.HTTP, router)
}

View file

@ -2,26 +2,25 @@ package main
import (
"context"
"openreplay/backend/pkg/spot"
"openreplay/backend/pkg/spot/api"
"os"
"os/signal"
"syscall"
spotConfig "openreplay/backend/internal/config/spot"
"openreplay/backend/internal/http/server"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/metrics"
databaseMetrics "openreplay/backend/pkg/metrics/database"
spotMetrics "openreplay/backend/pkg/metrics/spot"
"openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/server"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/spot"
)
func main() {
ctx := context.Background()
log := logger.New()
cfg := spotConfig.New(log)
metrics.New(log, append(spotMetrics.List(), databaseMetrics.List()...))
webMetrics := web.New("spot")
metrics.New(log, append(webMetrics.List(), append(spotMetrics.List(), databaseMetrics.List()...)...))
pgConn, err := pool.New(cfg.Postgres.String())
if err != nil {
@ -29,32 +28,17 @@ func main() {
}
defer pgConn.Close()
services, err := spot.NewServiceBuilder(log, cfg, pgConn)
builder, err := spot.NewServiceBuilder(log, cfg, webMetrics, pgConn)
if err != nil {
log.Fatal(ctx, "can't init services: %s", err)
}
router, err := api.NewRouter(cfg, log, services)
router, err := api.NewRouter(&cfg.HTTP, log)
if err != nil {
log.Fatal(ctx, "failed while creating router: %s", err)
}
router.AddHandlers(api.NoPrefix, builder.SpotsAPI)
router.AddMiddlewares(builder.Auth.Middleware, builder.RateLimiter.Middleware, builder.AuditTrail.Middleware)
spotServer, err := server.New(router.GetHandler(), cfg.HTTPHost, cfg.HTTPPort, cfg.HTTPTimeout)
if err != nil {
log.Fatal(ctx, "failed while creating server: %s", err)
}
go func() {
if err := spotServer.Start(); err != nil {
log.Fatal(ctx, "http server error: %s", err)
}
}()
log.Info(ctx, "server successfully started on port %s", cfg.HTTPPort)
// Wait stop signal to shut down server gracefully
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
<-sigchan
log.Info(ctx, "shutting down the server")
spotServer.Stop()
server.Run(ctx, log, &cfg.HTTP, router)
}

View file

@ -1,6 +1,9 @@
package common
import "strings"
import (
"strings"
"time"
)
// Common config for all services
@ -70,3 +73,13 @@ type ElasticSearch struct {
func (cfg *ElasticSearch) GetURLs() []string {
return strings.Split(cfg.URLs, ",")
}
type HTTP struct {
HTTPHost string `env:"HTTP_HOST,default="`
HTTPPort string `env:"HTTP_PORT,required"`
HTTPTimeout time.Duration `env:"HTTP_TIMEOUT,default=60s"`
JsonSizeLimit int64 `env:"JSON_SIZE_LIMIT,default=131072"` // 128KB, 1000 for HTTP service
UseAccessControlHeaders bool `env:"USE_CORS,default=false"`
JWTSecret string `env:"JWT_SECRET,required"`
JWTSpotSecret string `env:"JWT_SPOT_SECRET,required"`
}

View file

@ -15,31 +15,27 @@ type Config struct {
common.Postgres
redis.Redis
objectstorage.ObjectsConfig
HTTPHost string `env:"HTTP_HOST,default="`
HTTPPort string `env:"HTTP_PORT,required"`
HTTPTimeout time.Duration `env:"HTTP_TIMEOUT,default=60s"`
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
TopicRawMobile string `env:"TOPIC_RAW_IOS,required"`
TopicRawImages string `env:"TOPIC_RAW_IMAGES,required"`
TopicCanvasImages string `env:"TOPIC_CANVAS_IMAGES,required"`
BeaconSizeLimit int64 `env:"BEACON_SIZE_LIMIT,required"`
CompressionThreshold int64 `env:"COMPRESSION_THRESHOLD,default=20000"`
JsonSizeLimit int64 `env:"JSON_SIZE_LIMIT,default=1000"`
FileSizeLimit int64 `env:"FILE_SIZE_LIMIT,default=10000000"`
TokenSecret string `env:"TOKEN_SECRET,required"`
UAParserFile string `env:"UAPARSER_FILE,required"`
MaxMinDBFile string `env:"MAXMINDDB_FILE,required"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
UseAccessControlHeaders bool `env:"USE_CORS,default=false"`
ProjectExpiration time.Duration `env:"PROJECT_EXPIRATION,default=10m"`
RecordCanvas bool `env:"RECORD_CANVAS,default=false"`
CanvasQuality string `env:"CANVAS_QUALITY,default=low"`
CanvasFps int `env:"CANVAS_FPS,default=1"`
MobileQuality string `env:"MOBILE_QUALITY,default=low"` // (low, standard, high)
MobileFps int `env:"MOBILE_FPS,default=1"`
IsFeatureFlagEnabled bool `env:"IS_FEATURE_FLAG_ENABLED,default=false"`
IsUsabilityTestEnabled bool `env:"IS_USABILITY_TEST_ENABLED,default=false"`
WorkerID uint16
common.HTTP
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
TopicRawMobile string `env:"TOPIC_RAW_IOS,required"`
TopicRawImages string `env:"TOPIC_RAW_IMAGES,required"`
TopicCanvasImages string `env:"TOPIC_CANVAS_IMAGES,required"`
BeaconSizeLimit int64 `env:"BEACON_SIZE_LIMIT,required"`
CompressionThreshold int64 `env:"COMPRESSION_THRESHOLD,default=20000"`
FileSizeLimit int64 `env:"FILE_SIZE_LIMIT,default=10000000"`
TokenSecret string `env:"TOKEN_SECRET,required"`
UAParserFile string `env:"UAPARSER_FILE,required"`
MaxMinDBFile string `env:"MAXMINDDB_FILE,required"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
ProjectExpiration time.Duration `env:"PROJECT_EXPIRATION,default=10m"`
RecordCanvas bool `env:"RECORD_CANVAS,default=false"`
CanvasQuality string `env:"CANVAS_QUALITY,default=low"`
CanvasFps int `env:"CANVAS_FPS,default=1"`
MobileQuality string `env:"MOBILE_QUALITY,default=low"` // (low, standard, high)
MobileFps int `env:"MOBILE_FPS,default=1"`
IsFeatureFlagEnabled bool `env:"IS_FEATURE_FLAG_ENABLED,default=false"`
IsUsabilityTestEnabled bool `env:"IS_USABILITY_TEST_ENABLED,default=false"`
WorkerID uint16
}
func New(log logger.Logger) *Config {

View file

@ -16,14 +16,9 @@ type Config struct {
common.Postgres
redis.Redis
objectstorage.ObjectsConfig
HTTPHost string `env:"HTTP_HOST,default="`
HTTPPort string `env:"HTTP_PORT,required"`
HTTPTimeout time.Duration `env:"HTTP_TIMEOUT,default=60s"`
JsonSizeLimit int64 `env:"JSON_SIZE_LIMIT,default=131072"` // 128KB
UseAccessControlHeaders bool `env:"USE_CORS,default=false"`
ProjectExpiration time.Duration `env:"PROJECT_EXPIRATION,default=10m"`
JWTSecret string `env:"JWT_SECRET,required"`
WorkerID uint16
common.HTTP
ProjectExpiration time.Duration `env:"PROJECT_EXPIRATION,default=10m"`
WorkerID uint16
}
func New(log logger.Logger) *Config {

View file

@ -16,18 +16,12 @@ type Config struct {
common.Postgres
redis.Redis
objectstorage.ObjectsConfig
FSDir string `env:"FS_DIR,required"`
SpotsDir string `env:"SPOTS_DIR,default=spots"`
HTTPHost string `env:"HTTP_HOST,default="`
HTTPPort string `env:"HTTP_PORT,required"`
HTTPTimeout time.Duration `env:"HTTP_TIMEOUT,default=60s"`
JsonSizeLimit int64 `env:"JSON_SIZE_LIMIT,default=131072"` // 128KB
UseAccessControlHeaders bool `env:"USE_CORS,default=false"`
ProjectExpiration time.Duration `env:"PROJECT_EXPIRATION,default=10m"`
JWTSecret string `env:"JWT_SECRET,required"`
JWTSpotSecret string `env:"JWT_SPOT_SECRET,required"`
MinimumStreamDuration int `env:"MINIMUM_STREAM_DURATION,default=15000"` // 15s
WorkerID uint16
common.HTTP
FSDir string `env:"FS_DIR,required"`
SpotsDir string `env:"SPOTS_DIR,default=spots"`
ProjectExpiration time.Duration `env:"PROJECT_EXPIRATION,default=10m"`
MinimumStreamDuration int `env:"MINIMUM_STREAM_DURATION,default=15000"` // 15s
WorkerID uint16
}
func New(log logger.Logger) *Config {

View file

@ -2,7 +2,10 @@ package geoip
import (
"errors"
"github.com/tomasen/realip"
"net"
"net/http"
"openreplay/backend/pkg/logger"
"strings"
"github.com/oschwald/maxminddb-golang"
@ -46,18 +49,23 @@ func UnpackGeoRecord(pkg string) *GeoRecord {
type GeoParser interface {
Parse(ip net.IP) (*GeoRecord, error)
ExtractGeoData(r *http.Request) *GeoRecord
}
type geoParser struct {
r *maxminddb.Reader
log logger.Logger
r *maxminddb.Reader
}
func New(file string) (GeoParser, error) {
func New(log logger.Logger, file string) (GeoParser, error) {
r, err := maxminddb.Open(file)
if err != nil {
return nil, err
}
return &geoParser{r}, nil
return &geoParser{
log: log,
r: r,
}, nil
}
func (geoIP *geoParser) Parse(ip net.IP) (*GeoRecord, error) {
@ -82,3 +90,12 @@ func (geoIP *geoParser) Parse(ip net.IP) (*GeoRecord, error) {
res.City = record.City.Names["en"]
return res, nil
}
func (geoIP *geoParser) ExtractGeoData(r *http.Request) *GeoRecord {
ip := net.ParseIP(realip.FromRequest(r))
geoRec, err := geoIP.Parse(ip)
if err != nil {
geoIP.log.Warn(r.Context(), "failed to parse geo data: %v", err)
}
return geoRec
}

View file

@ -1,11 +0,0 @@
package router
import (
"errors"
"net/http"
"time"
)
func (e *Router) getConditions(w http.ResponseWriter, r *http.Request) {
e.ResponseWithError(r.Context(), w, http.StatusNotImplemented, errors.New("no support"), time.Now(), r.URL.Path, 0)
}

View file

@ -1,271 +0,0 @@
package router
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"openreplay/backend/internal/http/ios"
"openreplay/backend/internal/http/uuid"
"openreplay/backend/pkg/db/postgres"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/sessions"
"openreplay/backend/pkg/token"
"strconv"
"time"
)
func (e *Router) startMobileSessionHandler(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
if r.Body == nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
return
}
body := http.MaxBytesReader(w, r.Body, e.cfg.JsonSizeLimit)
defer body.Close()
req := &StartMobileSessionRequest{}
if err := json.NewDecoder(body).Decode(req); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, 0)
return
}
// Add tracker version to context
r = r.WithContext(context.WithValue(r.Context(), "tracker", req.TrackerVersion))
if req.ProjectKey == nil {
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("projectKey value required"), startTime, r.URL.Path, 0)
return
}
p, err := e.services.Projects.GetProjectByKey(*req.ProjectKey)
if err != nil {
if postgres.IsNoRowsErr(err) {
logErr := fmt.Errorf("project doesn't exist or is not active, key: %s", *req.ProjectKey)
e.ResponseWithError(r.Context(), w, http.StatusNotFound, logErr, startTime, r.URL.Path, 0)
} else {
e.log.Error(r.Context(), "failed to get project by key: %s, err: %s", *req.ProjectKey, err)
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, errors.New("can't find a project"), startTime, r.URL.Path, 0)
}
return
}
// Add projectID to context
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", p.ProjectID)))
// Check if the project supports mobile sessions
if !p.IsMobile() {
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("project doesn't support mobile sessions"), startTime, r.URL.Path, 0)
return
}
if !checkMobileTrackerVersion(req.TrackerVersion) {
e.ResponseWithError(r.Context(), w, http.StatusUpgradeRequired, errors.New("tracker version not supported"), startTime, r.URL.Path, 0)
return
}
userUUID := uuid.GetUUID(req.UserUUID)
tokenData, err := e.services.Tokenizer.Parse(req.Token)
if err != nil { // Starting the new one
dice := byte(rand.Intn(100)) // [0, 100)
// Use condition rate if it's set
if req.Condition != "" {
rate, err := e.services.Conditions.GetRate(p.ProjectID, req.Condition, int(p.SampleRate))
if err != nil {
e.log.Warn(r.Context(), "can't get condition rate, condition: %s, err: %s", req.Condition, err)
} else {
p.SampleRate = byte(rate)
}
}
if dice >= p.SampleRate {
e.ResponseWithError(r.Context(), w, http.StatusForbidden, fmt.Errorf("capture rate miss, rate: %d", p.SampleRate), startTime, r.URL.Path, 0)
return
}
ua := e.services.UaParser.ParseFromHTTPRequest(r)
if ua == nil {
e.ResponseWithError(r.Context(), w, http.StatusForbidden, fmt.Errorf("browser not recognized, user-agent: %s", r.Header.Get("User-Agent")), startTime, r.URL.Path, 0)
return
}
sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli()))
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
return
}
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
tokenData = &token.TokenData{sessionID, 0, expTime.UnixMilli()}
// Add sessionID to context
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionID)))
geoInfo := e.ExtractGeoData(r)
deviceType, platform, os := ios.GetIOSDeviceType(req.UserDevice), "ios", "IOS"
if req.Platform != "" && req.Platform != "ios" {
deviceType = req.UserDeviceType
platform = req.Platform
os = "Android"
}
if !req.DoNotRecord {
if err := e.services.Sessions.Add(&sessions.Session{
SessionID: sessionID,
Platform: platform,
Timestamp: req.Timestamp,
Timezone: req.Timezone,
ProjectID: p.ProjectID,
TrackerVersion: req.TrackerVersion,
RevID: req.RevID,
UserUUID: userUUID,
UserOS: os,
UserOSVersion: req.UserOSVersion,
UserDevice: ios.MapIOSDevice(req.UserDevice),
UserDeviceType: deviceType,
UserCountry: geoInfo.Country,
UserState: geoInfo.State,
UserCity: geoInfo.City,
UserDeviceMemorySize: req.DeviceMemory,
UserDeviceHeapSize: req.DeviceMemory,
ScreenWidth: req.Width,
ScreenHeight: req.Height,
}); err != nil {
e.log.Warn(r.Context(), "failed to add mobile session to DB: %s", err)
}
sessStart := &messages.MobileSessionStart{
Timestamp: req.Timestamp,
ProjectID: uint64(p.ProjectID),
TrackerVersion: req.TrackerVersion,
RevID: req.RevID,
UserUUID: userUUID,
UserOS: os,
UserOSVersion: req.UserOSVersion,
UserDevice: ios.MapIOSDevice(req.UserDevice),
UserDeviceType: deviceType,
UserCountry: geoInfo.Pack(),
}
if err := e.services.Producer.Produce(e.cfg.TopicRawMobile, tokenData.ID, sessStart.Encode()); err != nil {
e.log.Error(r.Context(), "failed to send mobile sessionStart event to queue: %s", err)
}
}
}
e.ResponseWithJSON(r.Context(), w, &StartMobileSessionResponse{
Token: e.services.Tokenizer.Compose(*tokenData),
UserUUID: userUUID,
SessionID: strconv.FormatUint(tokenData.ID, 10),
BeaconSizeLimit: e.cfg.BeaconSizeLimit,
ImageQuality: e.cfg.MobileQuality,
FrameRate: e.cfg.MobileFps,
ProjectID: strconv.FormatUint(uint64(p.ProjectID), 10),
Features: e.features,
}, startTime, r.URL.Path, 0)
}
func (e *Router) pushMobileMessagesHandler(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
return
}
// Add sessionID and projectID to context
if info, err := e.services.Sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawMobile)
}
func (e *Router) pushMobileLateMessagesHandler(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil && err != token.EXPIRED {
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
return
}
// Check timestamps here?
e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawMobile)
}
func (e *Router) mobileImagesUploadHandler(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
return
}
// Add sessionID and projectID to context
if info, err := e.services.Sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
if r.Body == nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
return
}
r.Body = http.MaxBytesReader(w, r.Body, e.cfg.FileSizeLimit)
defer r.Body.Close()
err = r.ParseMultipartForm(5 * 1e6) // ~5Mb
if err == http.ErrNotMultipart || err == http.ErrMissingBoundary {
e.ResponseWithError(r.Context(), w, http.StatusUnsupportedMediaType, err, startTime, r.URL.Path, 0)
return
} else if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging
return
}
if r.MultipartForm == nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, errors.New("multipart not parsed"), startTime, r.URL.Path, 0)
return
}
if len(r.MultipartForm.Value["projectKey"]) == 0 {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, errors.New("projectKey parameter missing"), startTime, r.URL.Path, 0) // status for missing/wrong parameter?
return
}
for _, fileHeaderList := range r.MultipartForm.File {
for _, fileHeader := range fileHeaderList {
file, err := fileHeader.Open()
if err != nil {
continue
}
data, err := io.ReadAll(file)
if err != nil {
file.Close()
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
return
}
file.Close()
if err := e.services.Producer.Produce(e.cfg.TopicRawImages, sessionData.ID, data); err != nil {
e.log.Warn(r.Context(), "failed to send image to queue: %s", err)
}
}
}
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, 0)
}

View file

@ -1,754 +0,0 @@
package router
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"strconv"
"strings"
"time"
"github.com/gorilla/mux"
"github.com/Masterminds/semver"
"github.com/klauspost/compress/gzip"
"openreplay/backend/internal/http/util"
"openreplay/backend/internal/http/uuid"
"openreplay/backend/pkg/db/postgres"
"openreplay/backend/pkg/featureflags"
"openreplay/backend/pkg/flakeid"
. "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/sessions"
"openreplay/backend/pkg/token"
"openreplay/backend/pkg/uxtesting"
)
func (e *Router) readBody(w http.ResponseWriter, r *http.Request, limit int64) ([]byte, error) {
body := http.MaxBytesReader(w, r.Body, limit)
var (
bodyBytes []byte
err error
)
// Check if body is gzipped and decompress it
if r.Header.Get("Content-Encoding") == "gzip" {
reader, err := gzip.NewReader(body)
if err != nil {
return nil, fmt.Errorf("can't create gzip reader: %s", err)
}
bodyBytes, err = io.ReadAll(reader)
if err != nil {
return nil, fmt.Errorf("can't read gzip body: %s", err)
}
if err := reader.Close(); err != nil {
e.log.Warn(r.Context(), "can't close gzip reader: %s", err)
}
} else {
bodyBytes, err = io.ReadAll(body)
}
// Close body
if closeErr := body.Close(); closeErr != nil {
e.log.Warn(r.Context(), "error while closing request body: %s", closeErr)
}
if err != nil {
return nil, err
}
return bodyBytes, nil
}
func checkMobileTrackerVersion(ver string) bool {
c, err := semver.NewConstraint(">=1.0.9")
if err != nil {
return false
}
// Check for beta version
parts := strings.Split(ver, "-")
if len(parts) > 1 {
ver = parts[0]
}
v, err := semver.NewVersion(ver)
if err != nil {
return false
}
return c.Check(v)
}
func getSessionTimestamp(req *StartSessionRequest, startTimeMili int64) (ts uint64) {
ts = uint64(req.Timestamp)
if req.IsOffline {
return
}
c, err := semver.NewConstraint(">=4.1.6")
if err != nil {
return
}
ver := req.TrackerVersion
parts := strings.Split(ver, "-")
if len(parts) > 1 {
ver = parts[0]
}
v, err := semver.NewVersion(ver)
if err != nil {
return
}
if c.Check(v) {
ts = uint64(startTimeMili)
if req.BufferDiff > 0 && req.BufferDiff < 5*60*1000 {
ts -= req.BufferDiff
}
}
return
}
func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Check request body
if r.Body == nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
return
}
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
// Parse request body
req := &StartSessionRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
// Add tracker version to context
r = r.WithContext(context.WithValue(r.Context(), "tracker", req.TrackerVersion))
// Handler's logic
if req.ProjectKey == nil {
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, bodySize)
return
}
p, err := e.services.Projects.GetProjectByKey(*req.ProjectKey)
if err != nil {
if postgres.IsNoRowsErr(err) {
logErr := fmt.Errorf("project doesn't exist or is not active, key: %s", *req.ProjectKey)
e.ResponseWithError(r.Context(), w, http.StatusNotFound, logErr, startTime, r.URL.Path, bodySize)
} else {
e.log.Error(r.Context(), "failed to get project by key: %s, err: %s", *req.ProjectKey, err)
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, errors.New("can't find a project"), startTime, r.URL.Path, bodySize)
}
return
}
// Add projectID to context
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", p.ProjectID)))
// Check if the project supports mobile sessions
if !p.IsWeb() {
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("project doesn't support web sessions"), startTime, r.URL.Path, bodySize)
return
}
ua := e.services.UaParser.ParseFromHTTPRequest(r)
if ua == nil {
e.ResponseWithError(r.Context(), w, http.StatusForbidden, fmt.Errorf("browser not recognized, user-agent: %s", r.Header.Get("User-Agent")), startTime, r.URL.Path, bodySize)
return
}
geoInfo := e.ExtractGeoData(r)
userUUID := uuid.GetUUID(req.UserUUID)
tokenData, err := e.services.Tokenizer.Parse(req.Token)
if err != nil || req.Reset { // Starting the new one
dice := byte(rand.Intn(100))
// Use condition rate if it's set
if req.Condition != "" {
rate, err := e.services.Conditions.GetRate(p.ProjectID, req.Condition, int(p.SampleRate))
if err != nil {
e.log.Warn(r.Context(), "can't get condition rate, condition: %s, err: %s", req.Condition, err)
} else {
p.SampleRate = byte(rate)
}
}
if dice >= p.SampleRate {
e.ResponseWithError(r.Context(), w, http.StatusForbidden, fmt.Errorf("capture rate miss, rate: %d", p.SampleRate), startTime, r.URL.Path, bodySize)
return
}
startTimeMili := startTime.UnixMilli()
sessionID, err := e.services.Flaker.Compose(uint64(startTimeMili))
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
tokenData = &token.TokenData{
ID: sessionID,
Delay: startTimeMili - req.Timestamp,
ExpTime: expTime.UnixMilli(),
}
// Add sessionID to context
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionID)))
if recordSession(req) {
sessionStart := &SessionStart{
Timestamp: getSessionTimestamp(req, startTimeMili),
ProjectID: uint64(p.ProjectID),
TrackerVersion: req.TrackerVersion,
RevID: req.RevID,
UserUUID: userUUID,
UserAgent: r.Header.Get("User-Agent"),
UserOS: ua.OS,
UserOSVersion: ua.OSVersion,
UserBrowser: ua.Browser,
UserBrowserVersion: ua.BrowserVersion,
UserDevice: ua.Device,
UserDeviceType: ua.DeviceType,
UserCountry: geoInfo.Pack(),
UserDeviceMemorySize: req.DeviceMemory,
UserDeviceHeapSize: req.JsHeapSizeLimit,
UserID: req.UserID,
}
// Save sessionStart to db
if err := e.services.Sessions.Add(&sessions.Session{
SessionID: sessionID,
Platform: "web",
Timestamp: sessionStart.Timestamp,
Timezone: req.Timezone,
ProjectID: uint32(sessionStart.ProjectID),
TrackerVersion: sessionStart.TrackerVersion,
RevID: sessionStart.RevID,
UserUUID: sessionStart.UserUUID,
UserOS: sessionStart.UserOS,
UserOSVersion: sessionStart.UserOSVersion,
UserDevice: sessionStart.UserDevice,
UserCountry: geoInfo.Country,
UserState: geoInfo.State,
UserCity: geoInfo.City,
UserAgent: sessionStart.UserAgent,
UserBrowser: sessionStart.UserBrowser,
UserBrowserVersion: sessionStart.UserBrowserVersion,
UserDeviceType: sessionStart.UserDeviceType,
UserDeviceMemorySize: sessionStart.UserDeviceMemorySize,
UserDeviceHeapSize: sessionStart.UserDeviceHeapSize,
UserID: &sessionStart.UserID,
ScreenWidth: req.Width,
ScreenHeight: req.Height,
}); err != nil {
e.log.Warn(r.Context(), "can't insert sessionStart to DB: %s", err)
}
// Send sessionStart message to kafka
if err := e.services.Producer.Produce(e.cfg.TopicRawWeb, tokenData.ID, sessionStart.Encode()); err != nil {
e.log.Error(r.Context(), "can't send sessionStart to queue: %s", err)
}
}
} else {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", tokenData.ID)))
}
// Save information about session beacon size
e.addBeaconSize(tokenData.ID, p.BeaconSize)
startResponse := &StartSessionResponse{
Token: e.services.Tokenizer.Compose(*tokenData),
UserUUID: userUUID,
UserOS: ua.OS,
UserDevice: ua.Device,
UserBrowser: ua.Browser,
UserCountry: geoInfo.Country,
UserState: geoInfo.State,
UserCity: geoInfo.City,
SessionID: strconv.FormatUint(tokenData.ID, 10),
ProjectID: strconv.FormatUint(uint64(p.ProjectID), 10),
BeaconSizeLimit: e.getBeaconSize(tokenData.ID),
CompressionThreshold: e.getCompressionThreshold(),
StartTimestamp: int64(flakeid.ExtractTimestamp(tokenData.ID)),
Delay: tokenData.Delay,
CanvasEnabled: e.cfg.RecordCanvas,
CanvasImageQuality: e.cfg.CanvasQuality,
CanvasFrameRate: e.cfg.CanvasFps,
Features: e.features,
}
modifyResponse(req, startResponse)
e.ResponseWithJSON(r.Context(), w, startResponse, startTime, r.URL.Path, bodySize)
}
func (e *Router) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Get debug header with batch info
if batch := r.URL.Query().Get("batch"); batch != "" {
r = r.WithContext(context.WithValue(r.Context(), "batch", batch))
}
// Check authorization
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
tokenJustExpired := false
if err != nil {
if errors.Is(err, token.JUST_EXPIRED) {
tokenJustExpired = true
} else {
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
}
// Add sessionID and projectID to context
if info, err := e.services.Sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
// Check request body
if r.Body == nil {
errCode := http.StatusBadRequest
if tokenJustExpired {
errCode = http.StatusUnauthorized
}
e.ResponseWithError(r.Context(), w, errCode, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
return
}
bodyBytes, err := e.readBody(w, r, e.getBeaconSize(sessionData.ID))
if err != nil {
errCode := http.StatusRequestEntityTooLarge
if tokenJustExpired {
errCode = http.StatusUnauthorized
}
e.ResponseWithError(r.Context(), w, errCode, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
// Send processed messages to queue as array of bytes
err = e.services.Producer.Produce(e.cfg.TopicRawWeb, sessionData.ID, bodyBytes)
if err != nil {
e.log.Error(r.Context(), "can't send messages batch to queue: %s", err)
errCode := http.StatusInternalServerError
if tokenJustExpired {
errCode = http.StatusUnauthorized
}
e.ResponseWithError(r.Context(), w, errCode, errors.New("can't save message, try again"), startTime, r.URL.Path, bodySize)
return
}
if tokenJustExpired {
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, errors.New("token expired"), startTime, r.URL.Path, bodySize)
return
}
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
if r.Body == nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
return
}
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
req := &NotStartedRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
// Add tracker version to context
r = r.WithContext(context.WithValue(r.Context(), "tracker", req.TrackerVersion))
// Handler's logic
if req.ProjectKey == nil {
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("projectKey value required"), startTime, r.URL.Path, bodySize)
return
}
p, err := e.services.Projects.GetProjectByKey(*req.ProjectKey)
if err != nil {
if postgres.IsNoRowsErr(err) {
logErr := fmt.Errorf("project doesn't exist or is not active, key: %s", *req.ProjectKey)
e.ResponseWithError(r.Context(), w, http.StatusNotFound, logErr, startTime, r.URL.Path, bodySize)
} else {
e.log.Error(r.Context(), "can't find a project: %s", err)
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, errors.New("can't find a project"), startTime, r.URL.Path, bodySize)
}
return
}
// Add projectID to context
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", p.ProjectID)))
ua := e.services.UaParser.ParseFromHTTPRequest(r)
if ua == nil {
e.ResponseWithError(r.Context(), w, http.StatusForbidden, fmt.Errorf("browser not recognized, user-agent: %s", r.Header.Get("User-Agent")), startTime, r.URL.Path, bodySize)
return
}
geoInfo := e.ExtractGeoData(r)
err = e.services.Sessions.AddUnStarted(&sessions.UnStartedSession{
ProjectKey: *req.ProjectKey,
TrackerVersion: req.TrackerVersion,
DoNotTrack: req.DoNotTrack,
Platform: "web",
UserAgent: r.Header.Get("User-Agent"),
UserOS: ua.OS,
UserOSVersion: ua.OSVersion,
UserBrowser: ua.Browser,
UserBrowserVersion: ua.BrowserVersion,
UserDevice: ua.Device,
UserDeviceType: ua.DeviceType,
UserCountry: geoInfo.Country,
UserState: geoInfo.State,
UserCity: geoInfo.City,
})
if err != nil {
e.log.Warn(r.Context(), "can't insert un-started session: %s", err)
}
// response ok anyway
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *Router) featureFlagsHandlerWeb(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Check authorization
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
// Add sessionID and projectID to context
if info, err := e.services.Sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
if r.Body == nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
return
}
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
// Parse request body
req := &featureflags.FeatureFlagsRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
computedFlags, err := e.services.FeatureFlags.ComputeFlagsForSession(req)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
resp := &featureflags.FeatureFlagsResponse{
Flags: computedFlags,
}
e.ResponseWithJSON(r.Context(), w, resp, startTime, r.URL.Path, bodySize)
}
func (e *Router) getUXTestInfo(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Check authorization
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
sess, err := e.services.Sessions.Get(sessionData.ID)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusForbidden, err, startTime, r.URL.Path, bodySize)
return
}
// Add projectID to context
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", sess.ProjectID)))
// Get taskID
vars := mux.Vars(r)
id := vars["id"]
// Get task info
info, err := e.services.UXTesting.GetInfo(id)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
if sess.ProjectID != info.ProjectID {
e.ResponseWithError(r.Context(), w, http.StatusForbidden, errors.New("project mismatch"), startTime, r.URL.Path, bodySize)
return
}
type TaskInfoResponse struct {
Task *uxtesting.UXTestInfo `json:"test"`
}
e.ResponseWithJSON(r.Context(), w, &TaskInfoResponse{Task: info}, startTime, r.URL.Path, bodySize)
}
func (e *Router) sendUXTestSignal(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Check authorization
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
// Add sessionID and projectID to context
if info, err := e.services.Sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
// Parse request body
req := &uxtesting.TestSignal{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
req.SessionID = sessionData.ID
// Save test signal
if err := e.services.UXTesting.SetTestSignal(req); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *Router) sendUXTaskSignal(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Check authorization
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
// Add sessionID and projectID to context
if info, err := e.services.Sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
// Parse request body
req := &uxtesting.TaskSignal{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
req.SessionID = sessionData.ID
// Save test signal
if err := e.services.UXTesting.SetTaskSignal(req); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *Router) getUXUploadUrl(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Check authorization
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
// Add sessionID and projectID to context
if info, err := e.services.Sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
key := fmt.Sprintf("%d/ux_webcam_record.webm", sessionData.ID)
url, err := e.services.ObjStorage.GetPreSignedUploadUrl(key)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
type UrlResponse struct {
URL string `json:"url"`
}
e.ResponseWithJSON(r.Context(), w, &UrlResponse{URL: url}, startTime, r.URL.Path, bodySize)
}
type ScreenshotMessage struct {
Name string
Data []byte
}
func (e *Router) imagesUploaderHandlerWeb(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil { // Should accept expired token?
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
return
}
// Add sessionID and projectID to context
if info, err := e.services.Sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
if r.Body == nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
return
}
r.Body = http.MaxBytesReader(w, r.Body, e.cfg.FileSizeLimit)
defer r.Body.Close()
// Parse the multipart form
err = r.ParseMultipartForm(10 << 20) // Max upload size 10 MB
if err == http.ErrNotMultipart || err == http.ErrMissingBoundary {
e.ResponseWithError(r.Context(), w, http.StatusUnsupportedMediaType, err, startTime, r.URL.Path, 0)
return
} else if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
return
}
// Iterate over uploaded files
for _, fileHeaderList := range r.MultipartForm.File {
for _, fileHeader := range fileHeaderList {
file, err := fileHeader.Open()
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
return
}
// Read the file content
fileBytes, err := io.ReadAll(file)
if err != nil {
file.Close()
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
return
}
file.Close()
fileName := util.SafeString(fileHeader.Filename)
// Create a message to send to Kafka
msg := ScreenshotMessage{
Name: fileName,
Data: fileBytes,
}
data, err := json.Marshal(&msg)
if err != nil {
e.log.Warn(r.Context(), "can't marshal screenshot message, err: %s", err)
continue
}
// Send the message to queue
if err := e.services.Producer.Produce(e.cfg.TopicCanvasImages, sessionData.ID, data); err != nil {
e.log.Warn(r.Context(), "can't send screenshot message to queue, err: %s", err)
}
}
}
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, 0)
}
func (e *Router) getTags(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Check authorization
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
sessInfo, err := e.services.Sessions.Get(sessionData.ID)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
// Add sessionID and projectID to context
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", sessInfo.ProjectID)))
// Get tags
tags, err := e.services.Tags.Get(sessInfo.ProjectID)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
type UrlResponse struct {
Tags interface{} `json:"tags"`
}
e.ResponseWithJSON(r.Context(), w, &UrlResponse{Tags: tags}, startTime, r.URL.Path, bodySize)
}

View file

@ -1,41 +0,0 @@
package router
import (
"io"
"net/http"
"time"
gzip "github.com/klauspost/pgzip"
)
func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) {
start := time.Now()
body := http.MaxBytesReader(w, r.Body, e.cfg.BeaconSizeLimit)
defer body.Close()
var reader io.ReadCloser
var err error
switch r.Header.Get("Content-Encoding") {
case "gzip":
reader, err = gzip.NewReader(body)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, start, r.URL.Path, 0)
return
}
defer reader.Close()
default:
reader = body
}
buf, err := io.ReadAll(reader)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, start, r.URL.Path, 0)
return
}
if err := e.services.Producer.Produce(topicName, sessionID, buf); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, start, r.URL.Path, 0)
return
}
w.WriteHeader(http.StatusOK)
e.log.Info(r.Context(), "response ok")
}

View file

@ -1,50 +0,0 @@
package router
import (
"context"
"encoding/json"
"net/http"
"time"
metrics "openreplay/backend/pkg/metrics/http"
)
func recordMetrics(requestStart time.Time, url string, code, bodySize int) {
if bodySize > 0 {
metrics.RecordRequestSize(float64(bodySize), url, code)
}
metrics.IncreaseTotalRequests()
metrics.RecordRequestDuration(float64(time.Now().Sub(requestStart).Milliseconds()), url, code)
}
func (e *Router) ResponseOK(ctx context.Context, w http.ResponseWriter, requestStart time.Time, url string, bodySize int) {
w.WriteHeader(http.StatusOK)
e.log.Info(ctx, "response ok")
recordMetrics(requestStart, url, http.StatusOK, bodySize)
}
func (e *Router) ResponseWithJSON(ctx context.Context, w http.ResponseWriter, res interface{}, requestStart time.Time, url string, bodySize int) {
e.log.Info(ctx, "response ok")
body, err := json.Marshal(res)
if err != nil {
e.log.Error(ctx, "can't marshal response: %s", err)
}
w.Header().Set("Content-Type", "application/json")
w.Write(body)
recordMetrics(requestStart, url, http.StatusOK, bodySize)
}
type response struct {
Error string `json:"error"`
}
func (e *Router) ResponseWithError(ctx context.Context, w http.ResponseWriter, code int, err error, requestStart time.Time, url string, bodySize int) {
e.log.Error(ctx, "response error, code: %d, error: %s", code, err)
body, err := json.Marshal(&response{err.Error()})
if err != nil {
e.log.Error(ctx, "can't marshal response: %s", err)
}
w.WriteHeader(code)
w.Write(body)
recordMetrics(requestStart, url, code, bodySize)
}

View file

@ -1,178 +0,0 @@
package router
import (
"fmt"
"github.com/docker/distribution/context"
"github.com/tomasen/realip"
"net"
"net/http"
"openreplay/backend/internal/http/geoip"
"openreplay/backend/pkg/logger"
"sync"
"time"
"github.com/gorilla/mux"
http3 "openreplay/backend/internal/config/http"
http2 "openreplay/backend/internal/http/services"
"openreplay/backend/internal/http/util"
)
type BeaconSize struct {
size int64
time time.Time
}
type Router struct {
log logger.Logger
cfg *http3.Config
router *mux.Router
mutex *sync.RWMutex
services *http2.ServicesBuilder
beaconSizeCache map[uint64]*BeaconSize // Cache for session's beaconSize
compressionThreshold int64
features map[string]bool
}
func NewRouter(cfg *http3.Config, log logger.Logger, services *http2.ServicesBuilder) (*Router, error) {
switch {
case cfg == nil:
return nil, fmt.Errorf("config is empty")
case services == nil:
return nil, fmt.Errorf("services is empty")
case log == nil:
return nil, fmt.Errorf("logger is empty")
}
e := &Router{
log: log,
cfg: cfg,
mutex: &sync.RWMutex{},
services: services,
beaconSizeCache: make(map[uint64]*BeaconSize),
compressionThreshold: cfg.CompressionThreshold,
features: map[string]bool{
"feature-flags": cfg.IsFeatureFlagEnabled,
"usability-test": cfg.IsUsabilityTestEnabled,
},
}
e.init()
go e.clearBeaconSizes()
return e, nil
}
func (e *Router) addBeaconSize(sessionID uint64, size int64) {
if size <= 0 {
return
}
e.mutex.Lock()
defer e.mutex.Unlock()
e.beaconSizeCache[sessionID] = &BeaconSize{
size: size,
time: time.Now(),
}
}
func (e *Router) getBeaconSize(sessionID uint64) int64 {
e.mutex.RLock()
defer e.mutex.RUnlock()
if beaconSize, ok := e.beaconSizeCache[sessionID]; ok {
beaconSize.time = time.Now()
return beaconSize.size
}
return e.cfg.BeaconSizeLimit
}
func (e *Router) getCompressionThreshold() int64 {
return e.compressionThreshold
}
func (e *Router) clearBeaconSizes() {
for {
time.Sleep(time.Minute * 2)
now := time.Now()
e.mutex.Lock()
for sid, bs := range e.beaconSizeCache {
if now.Sub(bs.time) > time.Minute*3 {
delete(e.beaconSizeCache, sid)
}
}
e.mutex.Unlock()
}
}
func (e *Router) ExtractGeoData(r *http.Request) *geoip.GeoRecord {
ip := net.ParseIP(realip.FromRequest(r))
geoRec, err := e.services.GeoIP.Parse(ip)
if err != nil {
e.log.Warn(r.Context(), "failed to parse geo data: %v", err)
}
return geoRec
}
func (e *Router) init() {
e.router = mux.NewRouter()
// Root path
e.router.HandleFunc("/", e.root)
handlers := map[string]func(http.ResponseWriter, *http.Request){
"/v1/web/not-started": e.notStartedHandlerWeb,
"/v1/web/start": e.startSessionHandlerWeb,
"/v1/web/i": e.pushMessagesHandlerWeb,
"/v1/web/feature-flags": e.featureFlagsHandlerWeb,
"/v1/web/images": e.imagesUploaderHandlerWeb,
"/v1/mobile/start": e.startMobileSessionHandler,
"/v1/mobile/i": e.pushMobileMessagesHandler,
"/v1/mobile/late": e.pushMobileLateMessagesHandler,
"/v1/mobile/images": e.mobileImagesUploadHandler,
"/v1/web/uxt/signals/test": e.sendUXTestSignal,
"/v1/web/uxt/signals/task": e.sendUXTaskSignal,
}
getHandlers := map[string]func(http.ResponseWriter, *http.Request){
"/v1/web/uxt/test/{id}": e.getUXTestInfo,
"/v1/web/uxt/upload-url": e.getUXUploadUrl,
"/v1/web/tags": e.getTags,
"/v1/web/conditions/{project}": e.getConditions,
"/v1/mobile/conditions/{project}": e.getConditions,
}
prefix := "/ingest"
for path, handler := range handlers {
e.router.HandleFunc(path, handler).Methods("POST", "OPTIONS")
e.router.HandleFunc(prefix+path, handler).Methods("POST", "OPTIONS")
}
for path, handler := range getHandlers {
e.router.HandleFunc(path, handler).Methods("GET", "OPTIONS")
e.router.HandleFunc(prefix+path, handler).Methods("GET", "OPTIONS")
}
// CORS middleware
e.router.Use(e.corsMiddleware)
}
func (e *Router) root(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func (e *Router) corsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if e.cfg.UseAccessControlHeaders {
// Prepare headers for preflight requests
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST,GET")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type,Authorization,Content-Encoding")
}
if r.Method == http.MethodOptions {
w.Header().Set("Cache-Control", "max-age=86400")
w.WriteHeader(http.StatusOK)
return
}
r = r.WithContext(context.WithValues(r.Context(), map[string]interface{}{"httpMethod": r.Method, "url": util.SafeString(r.URL.Path)}))
// Serve request
next.ServeHTTP(w, r)
})
}
func (e *Router) GetHandler() http.Handler {
return e.router
}

View file

@ -1,43 +0,0 @@
package server
import (
"context"
"errors"
"fmt"
"golang.org/x/net/http2"
"net/http"
"time"
)
type Server struct {
server *http.Server
}
func New(handler http.Handler, host, port string, timeout time.Duration) (*Server, error) {
switch {
case port == "":
return nil, errors.New("empty server port")
case handler == nil:
return nil, errors.New("empty handler")
case timeout < 1:
return nil, fmt.Errorf("invalid timeout %d", timeout)
}
server := &http.Server{
Addr: fmt.Sprintf("%s:%s", host, port),
Handler: handler,
ReadTimeout: timeout,
WriteTimeout: timeout,
}
http2.ConfigureServer(server, nil)
return &Server{
server: server,
}, nil
}
func (s *Server) Start() error {
return s.server.ListenAndServe()
}
func (s *Server) Stop() {
s.server.Shutdown(context.Background())
}

View file

@ -5,44 +5,44 @@ import (
"openreplay/backend/internal/http/geoip"
"openreplay/backend/internal/http/uaparser"
"openreplay/backend/pkg/conditions"
conditionsAPI "openreplay/backend/pkg/conditions/api"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/db/redis"
"openreplay/backend/pkg/featureflags"
featureflagsAPI "openreplay/backend/pkg/featureflags/api"
"openreplay/backend/pkg/flakeid"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/objectstorage"
"openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/objectstorage/store"
"openreplay/backend/pkg/projects"
"openreplay/backend/pkg/queue/types"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/sessions"
mobilesessions "openreplay/backend/pkg/sessions/api/mobile"
websessions "openreplay/backend/pkg/sessions/api/web"
"openreplay/backend/pkg/tags"
tagsAPI "openreplay/backend/pkg/tags/api"
"openreplay/backend/pkg/token"
"openreplay/backend/pkg/uxtesting"
uxtestingAPI "openreplay/backend/pkg/uxtesting/api"
)
type ServicesBuilder struct {
Projects projects.Projects
Sessions sessions.Sessions
FeatureFlags featureflags.FeatureFlags
Producer types.Producer
Flaker *flakeid.Flaker
UaParser *uaparser.UAParser
GeoIP geoip.GeoParser
Tokenizer *token.Tokenizer
ObjStorage objectstorage.ObjectStorage
UXTesting uxtesting.UXTesting
Tags tags.Tags
Conditions conditions.Conditions
WebAPI api.Handlers
MobileAPI api.Handlers
ConditionsAPI api.Handlers
FeatureFlagsAPI api.Handlers
TagsAPI api.Handlers
UxTestsAPI api.Handlers
}
func New(log logger.Logger, cfg *http.Config, producer types.Producer, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) {
func New(log logger.Logger, cfg *http.Config, metrics web.Web, producer types.Producer, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) {
projs := projects.New(log, pgconn, redis)
// ObjectStorage client to generate pre-signed upload urls
objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil {
return nil, err
}
geoModule, err := geoip.New(cfg.MaxMinDBFile)
geoModule, err := geoip.New(log, cfg.MaxMinDBFile)
if err != nil {
return nil, err
}
@ -50,18 +50,32 @@ func New(log logger.Logger, cfg *http.Config, producer types.Producer, pgconn po
if err != nil {
return nil, err
}
return &ServicesBuilder{
Projects: projs,
Sessions: sessions.New(log, pgconn, projs, redis),
FeatureFlags: featureflags.New(pgconn),
Producer: producer,
Tokenizer: token.NewTokenizer(cfg.TokenSecret),
UaParser: uaModule,
GeoIP: geoModule,
Flaker: flakeid.NewFlaker(cfg.WorkerID),
ObjStorage: objStore,
UXTesting: uxtesting.New(pgconn),
Tags: tags.New(log, pgconn),
Conditions: conditions.New(pgconn),
}, nil
tokenizer := token.NewTokenizer(cfg.TokenSecret)
conditions := conditions.New(pgconn)
flaker := flakeid.NewFlaker(cfg.WorkerID)
sessions := sessions.New(log, pgconn, projs, redis)
featureFlags := featureflags.New(pgconn)
tags := tags.New(log, pgconn)
uxTesting := uxtesting.New(pgconn)
responser := api.NewResponser(metrics)
builder := &ServicesBuilder{}
if builder.WebAPI, err = websessions.NewHandlers(cfg, log, responser, producer, projs, sessions, uaModule, geoModule, tokenizer, conditions, flaker); err != nil {
return nil, err
}
if builder.MobileAPI, err = mobilesessions.NewHandlers(cfg, log, responser, producer, projs, sessions, uaModule, geoModule, tokenizer, conditions, flaker); err != nil {
return nil, err
}
if builder.ConditionsAPI, err = conditionsAPI.NewHandlers(log, responser, tokenizer, conditions); err != nil {
return nil, err
}
if builder.FeatureFlagsAPI, err = featureflagsAPI.NewHandlers(log, responser, cfg.JsonSizeLimit, tokenizer, sessions, featureFlags); err != nil {
return nil, err
}
if builder.TagsAPI, err = tagsAPI.NewHandlers(log, responser, tokenizer, sessions, tags); err != nil {
return nil, err
}
if builder.UxTestsAPI, err = uxtestingAPI.NewHandlers(log, responser, cfg.JsonSizeLimit, tokenizer, sessions, uxTesting, objStore); err != nil {
return nil, err
}
return builder, nil
}

View file

@ -0,0 +1,34 @@
package api
import (
"net/http"
"time"
"openreplay/backend/pkg/conditions"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/token"
)
type handlersImpl struct {
log logger.Logger
responser *api.Responser
}
func NewHandlers(log logger.Logger, responser *api.Responser, tokenizer *token.Tokenizer, conditions conditions.Conditions) (api.Handlers, error) {
return &handlersImpl{
log: log,
responser: responser,
}, nil
}
func (e *handlersImpl) GetAll() []*api.Description {
return []*api.Description{
{"/v1/web/conditions/{project}", e.getConditions, "GET"},
{"/v1/mobile/conditions/{project}", e.getConditions, "GET"},
}
}
func (e *handlersImpl) getConditions(w http.ResponseWriter, r *http.Request) {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusNotImplemented, nil, time.Now(), r.URL.Path, 0)
}

View file

@ -4,9 +4,9 @@ import (
"bytes"
"errors"
"fmt"
"openreplay/backend/pkg/db/postgres/pool"
"time"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/metrics/database"
)

View file

@ -0,0 +1,92 @@
package api
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"time"
"openreplay/backend/pkg/featureflags"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/sessions"
"openreplay/backend/pkg/token"
)
type handlersImpl struct {
log logger.Logger
responser *api.Responser
jsonSizeLimit int64
tokenizer *token.Tokenizer
sessions sessions.Sessions
featureFlags featureflags.FeatureFlags
}
func NewHandlers(log logger.Logger, responser *api.Responser, jsonSizeLimit int64, tokenizer *token.Tokenizer, sessions sessions.Sessions,
featureFlags featureflags.FeatureFlags) (api.Handlers, error) {
return &handlersImpl{
log: log,
responser: responser,
jsonSizeLimit: jsonSizeLimit,
tokenizer: tokenizer,
sessions: sessions,
featureFlags: featureFlags,
}, nil
}
func (e *handlersImpl) GetAll() []*api.Description {
return []*api.Description{
{"/v1/web/feature-flags", e.featureFlagsHandlerWeb, "POST"},
}
}
func (e *handlersImpl) featureFlagsHandlerWeb(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Check authorization
sessionData, err := e.tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
// Add sessionID and projectID to context
if info, err := e.sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
if r.Body == nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
return
}
bodyBytes, err := api.ReadCompressedBody(e.log, w, r, e.jsonSizeLimit)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
// Parse request body
req := &featureflags.FeatureFlagsRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
computedFlags, err := e.featureFlags.ComputeFlagsForSession(req)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
resp := &featureflags.FeatureFlagsResponse{
Flags: computedFlags,
}
e.responser.ResponseWithJSON(e.log, r.Context(), w, resp, startTime, r.URL.Path, bodySize)
}

View file

@ -0,0 +1,206 @@
package api
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
"strings"
"time"
"github.com/gorilla/mux"
integrationsCfg "openreplay/backend/internal/config/integrations"
"openreplay/backend/pkg/integrations/service"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/server/api"
)
type handlersImpl struct {
log logger.Logger
responser *api.Responser
integrations service.Service
jsonSizeLimit int64
}
func NewHandlers(log logger.Logger, cfg *integrationsCfg.Config, responser *api.Responser, integrations service.Service) (api.Handlers, error) {
return &handlersImpl{
log: log,
responser: responser,
integrations: integrations,
jsonSizeLimit: cfg.JsonSizeLimit,
}, nil
}
func (e *handlersImpl) GetAll() []*api.Description {
return []*api.Description{
{"/v1/integrations/{name}/{project}", e.createIntegration, "POST"},
{"/v1/integrations/{name}/{project}", e.getIntegration, "GET"},
{"/v1/integrations/{name}/{project}", e.updateIntegration, "PATCH"},
{"/v1/integrations/{name}/{project}", e.deleteIntegration, "DELETE"},
{"/v1/integrations/{name}/{project}/data/{session}", e.getIntegrationData, "GET"},
}
}
func getIntegrationsArgs(r *http.Request) (string, uint64, error) {
vars := mux.Vars(r)
name := vars["name"]
if name == "" {
return "", 0, fmt.Errorf("empty integration name")
}
project := vars["project"]
if project == "" {
return "", 0, fmt.Errorf("project id is empty")
}
projID, err := strconv.ParseUint(project, 10, 64)
if err != nil || projID <= 0 {
return "", 0, fmt.Errorf("invalid project id")
}
return name, projID, nil
}
func getIntegrationSession(r *http.Request) (uint64, error) {
vars := mux.Vars(r)
session := vars["session"]
if session == "" {
return 0, fmt.Errorf("session id is empty")
}
sessID, err := strconv.ParseUint(session, 10, 64)
if err != nil || sessID <= 0 {
return 0, fmt.Errorf("invalid session id")
}
return sessID, nil
}
type IntegrationRequest struct {
IntegrationData map[string]string `json:"data"`
}
func (e *handlersImpl) createIntegration(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
bodyBytes, err := api.ReadBody(e.log, w, r, e.jsonSizeLimit)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
integration, project, err := getIntegrationsArgs(r)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
req := &IntegrationRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
if err := e.integrations.AddIntegration(project, integration, req.IntegrationData); err != nil {
if strings.Contains(err.Error(), "failed to validate") {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnprocessableEntity, err, startTime, r.URL.Path, bodySize)
} else {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
}
return
}
e.responser.ResponseOK(e.log, r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *handlersImpl) getIntegration(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
integration, project, err := getIntegrationsArgs(r)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
intParams, err := e.integrations.GetIntegration(project, integration)
if err != nil {
if strings.Contains(err.Error(), "no rows in result set") {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusNotFound, err, startTime, r.URL.Path, bodySize)
} else {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
}
return
}
e.responser.ResponseWithJSON(e.log, r.Context(), w, intParams, startTime, r.URL.Path, bodySize)
}
func (e *handlersImpl) updateIntegration(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
bodyBytes, err := api.ReadBody(e.log, w, r, e.jsonSizeLimit)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
integration, project, err := getIntegrationsArgs(r)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
req := &IntegrationRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
if err := e.integrations.UpdateIntegration(project, integration, req.IntegrationData); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
e.responser.ResponseOK(e.log, r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *handlersImpl) deleteIntegration(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
integration, project, err := getIntegrationsArgs(r)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
if err := e.integrations.DeleteIntegration(project, integration); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
e.responser.ResponseOK(e.log, r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *handlersImpl) getIntegrationData(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
integration, project, err := getIntegrationsArgs(r)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
session, err := getIntegrationSession(r)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
url, err := e.integrations.GetSessionDataURL(project, integration, session)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
resp := map[string]string{"url": url}
e.responser.ResponseWithJSON(e.log, r.Context(), w, resp, startTime, r.URL.Path, bodySize)
}

View file

@ -1,36 +1,51 @@
package data_integration
package integrations
import (
"openreplay/backend/pkg/integrations/service"
"openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/server/tracer"
"time"
"openreplay/backend/internal/config/integrations"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/flakeid"
integrationsAPI "openreplay/backend/pkg/integrations/api"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/objectstorage"
"openreplay/backend/pkg/objectstorage/store"
"openreplay/backend/pkg/spot/auth"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/server/auth"
"openreplay/backend/pkg/server/limiter"
)
type ServiceBuilder struct {
Flaker *flakeid.Flaker
ObjStorage objectstorage.ObjectStorage
Auth auth.Auth
Integrator Service
Auth auth.Auth
RateLimiter *limiter.UserRateLimiter
AuditTrail tracer.Tracer
IntegrationsAPI api.Handlers
}
func NewServiceBuilder(log logger.Logger, cfg *integrations.Config, pgconn pool.Pool) (*ServiceBuilder, error) {
func NewServiceBuilder(log logger.Logger, cfg *integrations.Config, webMetrics web.Web, pgconn pool.Pool) (*ServiceBuilder, error) {
objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil {
return nil, err
}
integrator, err := NewService(log, pgconn, objStore)
integrator, err := service.NewService(log, pgconn, objStore)
if err != nil {
return nil, err
}
flaker := flakeid.NewFlaker(cfg.WorkerID)
return &ServiceBuilder{
Flaker: flaker,
ObjStorage: objStore,
Auth: auth.NewAuth(log, cfg.JWTSecret, "", pgconn),
Integrator: integrator,
}, nil
responser := api.NewResponser(webMetrics)
handlers, err := integrationsAPI.NewHandlers(log, cfg, responser, integrator)
if err != nil {
return nil, err
}
auditrail, err := tracer.NewTracer(log, pgconn)
if err != nil {
return nil, err
}
builder := &ServiceBuilder{
Auth: auth.NewAuth(log, cfg.JWTSecret, "", pgconn, nil),
RateLimiter: limiter.NewUserRateLimiter(10, 30, 1*time.Minute, 5*time.Minute),
AuditTrail: auditrail,
IntegrationsAPI: handlers,
}
return builder, nil
}

View file

@ -1,233 +0,0 @@
package data_integration
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
"github.com/gorilla/mux"
metrics "openreplay/backend/pkg/metrics/heuristics"
)
func getIntegrationsArgs(r *http.Request) (string, uint64, error) {
vars := mux.Vars(r)
name := vars["name"]
if name == "" {
return "", 0, fmt.Errorf("empty integration name")
}
project := vars["project"]
if project == "" {
return "", 0, fmt.Errorf("project id is empty")
}
projID, err := strconv.ParseUint(project, 10, 64)
if err != nil || projID <= 0 {
return "", 0, fmt.Errorf("invalid project id")
}
return name, projID, nil
}
func getIntegrationSession(r *http.Request) (uint64, error) {
vars := mux.Vars(r)
session := vars["session"]
if session == "" {
return 0, fmt.Errorf("session id is empty")
}
sessID, err := strconv.ParseUint(session, 10, 64)
if err != nil || sessID <= 0 {
return 0, fmt.Errorf("invalid session id")
}
return sessID, nil
}
type IntegrationRequest struct {
IntegrationData map[string]string `json:"data"`
}
func (e *Router) createIntegration(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
integration, project, err := getIntegrationsArgs(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
req := &IntegrationRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
if err := e.services.Integrator.AddIntegration(project, integration, req.IntegrationData); err != nil {
if strings.Contains(err.Error(), "failed to validate") {
e.ResponseWithError(r.Context(), w, http.StatusUnprocessableEntity, err, startTime, r.URL.Path, bodySize)
} else {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
}
return
}
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *Router) getIntegration(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
integration, project, err := getIntegrationsArgs(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
intParams, err := e.services.Integrator.GetIntegration(project, integration)
if err != nil {
if strings.Contains(err.Error(), "no rows in result set") {
e.ResponseWithError(r.Context(), w, http.StatusNotFound, err, startTime, r.URL.Path, bodySize)
} else {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
}
return
}
e.ResponseWithJSON(r.Context(), w, intParams, startTime, r.URL.Path, bodySize)
}
func (e *Router) updateIntegration(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
integration, project, err := getIntegrationsArgs(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
req := &IntegrationRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
if err := e.services.Integrator.UpdateIntegration(project, integration, req.IntegrationData); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *Router) deleteIntegration(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
integration, project, err := getIntegrationsArgs(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
if err := e.services.Integrator.DeleteIntegration(project, integration); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *Router) getIntegrationData(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
integration, project, err := getIntegrationsArgs(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
session, err := getIntegrationSession(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
url, err := e.services.Integrator.GetSessionDataURL(project, integration, session)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
resp := map[string]string{"url": url}
e.ResponseWithJSON(r.Context(), w, resp, startTime, r.URL.Path, bodySize)
}
func recordMetrics(requestStart time.Time, url string, code, bodySize int) {
if bodySize > 0 {
metrics.RecordRequestSize(float64(bodySize), url, code)
}
metrics.IncreaseTotalRequests()
metrics.RecordRequestDuration(float64(time.Now().Sub(requestStart).Milliseconds()), url, code)
}
func (e *Router) readBody(w http.ResponseWriter, r *http.Request, limit int64) ([]byte, error) {
body := http.MaxBytesReader(w, r.Body, limit)
bodyBytes, err := io.ReadAll(body)
// Close body
if closeErr := body.Close(); closeErr != nil {
e.log.Warn(r.Context(), "error while closing request body: %s", closeErr)
}
if err != nil {
return nil, err
}
return bodyBytes, nil
}
func (e *Router) ResponseOK(ctx context.Context, w http.ResponseWriter, requestStart time.Time, url string, bodySize int) {
w.WriteHeader(http.StatusOK)
e.log.Info(ctx, "response ok")
recordMetrics(requestStart, url, http.StatusOK, bodySize)
}
func (e *Router) ResponseWithJSON(ctx context.Context, w http.ResponseWriter, res interface{}, requestStart time.Time, url string, bodySize int) {
e.log.Info(ctx, "response ok")
body, err := json.Marshal(res)
if err != nil {
e.log.Error(ctx, "can't marshal response: %s", err)
}
w.Header().Set("Content-Type", "application/json")
w.Write(body)
recordMetrics(requestStart, url, http.StatusOK, bodySize)
}
type response struct {
Error string `json:"error"`
}
func (e *Router) ResponseWithError(ctx context.Context, w http.ResponseWriter, code int, err error, requestStart time.Time, url string, bodySize int) {
e.log.Error(ctx, "response error, code: %d, error: %s", code, err)
body, err := json.Marshal(&response{err.Error()})
if err != nil {
e.log.Error(ctx, "can't marshal response: %s", err)
}
w.WriteHeader(code)
w.Write(body)
recordMetrics(requestStart, url, code, bodySize)
}

View file

@ -1,170 +0,0 @@
package data_integration
import (
"bytes"
"fmt"
"io"
"net/http"
"time"
"github.com/docker/distribution/context"
"github.com/gorilla/mux"
integration "openreplay/backend/internal/config/integrations"
"openreplay/backend/internal/http/util"
"openreplay/backend/pkg/logger"
limiter "openreplay/backend/pkg/spot/api"
"openreplay/backend/pkg/spot/auth"
)
type Router struct {
log logger.Logger
cfg *integration.Config
router *mux.Router
services *ServiceBuilder
limiter *limiter.UserRateLimiter
}
func NewRouter(cfg *integration.Config, log logger.Logger, services *ServiceBuilder) (*Router, error) {
switch {
case cfg == nil:
return nil, fmt.Errorf("config is empty")
case services == nil:
return nil, fmt.Errorf("services is empty")
case log == nil:
return nil, fmt.Errorf("logger is empty")
}
e := &Router{
log: log,
cfg: cfg,
services: services,
limiter: limiter.NewUserRateLimiter(10, 30, 1*time.Minute, 5*time.Minute),
}
e.init()
return e, nil
}
func (e *Router) init() {
e.router = mux.NewRouter()
// Root route
e.router.HandleFunc("/", e.ping)
e.router.HandleFunc("/v1/integrations/{name}/{project}", e.createIntegration).Methods("POST", "OPTIONS")
e.router.HandleFunc("/v1/integrations/{name}/{project}", e.getIntegration).Methods("GET", "OPTIONS")
e.router.HandleFunc("/v1/integrations/{name}/{project}", e.updateIntegration).Methods("PATCH", "OPTIONS")
e.router.HandleFunc("/v1/integrations/{name}/{project}", e.deleteIntegration).Methods("DELETE", "OPTIONS")
e.router.HandleFunc("/v1/integrations/{name}/{project}/data/{session}", e.getIntegrationData).Methods("GET", "OPTIONS")
// CORS middleware
e.router.Use(e.corsMiddleware)
e.router.Use(e.authMiddleware)
e.router.Use(e.rateLimitMiddleware)
e.router.Use(e.actionMiddleware)
}
func (e *Router) ping(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func (e *Router) corsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
next.ServeHTTP(w, r)
}
if e.cfg.UseAccessControlHeaders {
// Prepare headers for preflight requests
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST,GET,PATCH,DELETE")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type,Authorization,Content-Encoding")
}
if r.Method == http.MethodOptions {
w.Header().Set("Cache-Control", "max-age=86400")
w.WriteHeader(http.StatusOK)
return
}
r = r.WithContext(context.WithValues(r.Context(), map[string]interface{}{"httpMethod": r.Method, "url": util.SafeString(r.URL.Path)}))
next.ServeHTTP(w, r)
})
}
func (e *Router) authMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
next.ServeHTTP(w, r)
}
// Check if the request is authorized
user, err := e.services.Auth.IsAuthorized(r.Header.Get("Authorization"), nil, false)
if err != nil {
e.log.Warn(r.Context(), "Unauthorized request: %s", err)
w.WriteHeader(http.StatusUnauthorized)
return
}
r = r.WithContext(context.WithValues(r.Context(), map[string]interface{}{"userData": user}))
next.ServeHTTP(w, r)
})
}
func (e *Router) rateLimitMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
next.ServeHTTP(w, r)
}
user := r.Context().Value("userData").(*auth.User)
rl := e.limiter.GetRateLimiter(user.ID)
if !rl.Allow() {
http.Error(w, "Too Many Requests", http.StatusTooManyRequests)
return
}
next.ServeHTTP(w, r)
})
}
type statusWriter struct {
http.ResponseWriter
statusCode int
}
func (w *statusWriter) WriteHeader(statusCode int) {
w.statusCode = statusCode
w.ResponseWriter.WriteHeader(statusCode)
}
func (w *statusWriter) Write(b []byte) (int, error) {
if w.statusCode == 0 {
w.statusCode = http.StatusOK
}
return w.ResponseWriter.Write(b)
}
func (e *Router) actionMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
next.ServeHTTP(w, r)
}
// Read body and restore the io.ReadCloser to its original state
bodyBytes, err := io.ReadAll(r.Body)
if err != nil {
http.Error(w, "can't read body", http.StatusBadRequest)
return
}
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
// Use custom response writer to get the status code
sw := &statusWriter{ResponseWriter: w}
// Serve the request
next.ServeHTTP(sw, r)
e.logRequest(r, bodyBytes, sw.statusCode)
})
}
func (e *Router) logRequest(r *http.Request, bodyBytes []byte, statusCode int) {
e.log.Info(r.Context(), "Request: %s %s %s %d", r.Method, r.URL.Path, bodyBytes, statusCode)
}
func (e *Router) GetHandler() http.Handler {
return e.router
}

View file

@ -1,4 +1,4 @@
package data_integration
package service
import (
"bytes"

View file

@ -1,55 +0,0 @@
package http
import (
"github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
"strconv"
)
var httpRequestSize = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "http",
Name: "request_size_bytes",
Help: "A histogram displaying the size of each HTTP request in bytes.",
Buckets: common.DefaultSizeBuckets,
},
[]string{"url", "response_code"},
)
func RecordRequestSize(size float64, url string, code int) {
httpRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size)
}
var httpRequestDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "http",
Name: "request_duration_seconds",
Help: "A histogram displaying the duration of each HTTP request in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"url", "response_code"},
)
func RecordRequestDuration(durMillis float64, url string, code int) {
httpRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0)
}
var httpTotalRequests = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "http",
Name: "requests_total",
Help: "A counter displaying the number all HTTP requests.",
},
)
func IncreaseTotalRequests() {
httpTotalRequests.Inc()
}
func List() []prometheus.Collector {
return []prometheus.Collector{
httpRequestSize,
httpRequestDuration,
httpTotalRequests,
}
}

View file

@ -1,53 +1,11 @@
package spot
import (
"strconv"
"github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
)
var spotRequestSize = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "spot",
Name: "request_size_bytes",
Help: "A histogram displaying the size of each HTTP request in bytes.",
Buckets: common.DefaultSizeBuckets,
},
[]string{"url", "response_code"},
)
func RecordRequestSize(size float64, url string, code int) {
spotRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size)
}
var spotRequestDuration = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "spot",
Name: "request_duration_seconds",
Help: "A histogram displaying the duration of each HTTP request in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"url", "response_code"},
)
func RecordRequestDuration(durMillis float64, url string, code int) {
spotRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0)
}
var spotTotalRequests = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "spot",
Name: "requests_total",
Help: "A counter displaying the number all HTTP requests.",
},
)
func IncreaseTotalRequests() {
spotTotalRequests.Inc()
}
var spotOriginalVideoSize = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: "spot",
@ -177,9 +135,6 @@ func RecordTranscodedVideoUploadDuration(durMillis float64) {
func List() []prometheus.Collector {
return []prometheus.Collector{
spotRequestSize,
spotRequestDuration,
spotTotalRequests,
spotOriginalVideoSize,
spotCroppedVideoSize,
spotVideosTotal,

View file

@ -0,0 +1,84 @@
package web
import (
"strconv"
"github.com/prometheus/client_golang/prometheus"
"openreplay/backend/pkg/metrics/common"
)
type Web interface {
RecordRequestSize(size float64, url string, code int)
RecordRequestDuration(durMillis float64, url string, code int)
IncreaseTotalRequests()
List() []prometheus.Collector
}
type webImpl struct {
httpRequestSize *prometheus.HistogramVec
httpRequestDuration *prometheus.HistogramVec
httpTotalRequests prometheus.Counter
}
func New(serviceName string) Web {
return &webImpl{
httpRequestSize: newRequestSizeMetric(serviceName),
httpRequestDuration: newRequestDurationMetric(serviceName),
httpTotalRequests: newTotalRequestsMetric(serviceName),
}
}
func (w *webImpl) List() []prometheus.Collector {
return []prometheus.Collector{
w.httpRequestSize,
w.httpRequestDuration,
w.httpTotalRequests,
}
}
func newRequestSizeMetric(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "request_size_bytes",
Help: "A histogram displaying the size of each HTTP request in bytes.",
Buckets: common.DefaultSizeBuckets,
},
[]string{"url", "response_code"},
)
}
func (w *webImpl) RecordRequestSize(size float64, url string, code int) {
w.httpRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size)
}
func newRequestDurationMetric(serviceName string) *prometheus.HistogramVec {
return prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: serviceName,
Name: "request_duration_seconds",
Help: "A histogram displaying the duration of each HTTP request in seconds.",
Buckets: common.DefaultDurationBuckets,
},
[]string{"url", "response_code"},
)
}
func (w *webImpl) RecordRequestDuration(durMillis float64, url string, code int) {
w.httpRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0)
}
func newTotalRequestsMetric(serviceName string) prometheus.Counter {
return prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: serviceName,
Name: "requests_total",
Help: "A counter displaying the number all HTTP requests.",
},
)
}
func (w *webImpl) IncreaseTotalRequests() {
w.httpTotalRequests.Inc()
}

View file

@ -0,0 +1,59 @@
package api
import (
"fmt"
"io"
"net/http"
"github.com/klauspost/compress/gzip"
"openreplay/backend/pkg/logger"
)
func ReadBody(log logger.Logger, w http.ResponseWriter, r *http.Request, limit int64) ([]byte, error) {
body := http.MaxBytesReader(w, r.Body, limit)
bodyBytes, err := io.ReadAll(body)
// Close body
if closeErr := body.Close(); closeErr != nil {
log.Warn(r.Context(), "error while closing request body: %s", closeErr)
}
if err != nil {
return nil, err
}
return bodyBytes, nil
}
func ReadCompressedBody(log logger.Logger, w http.ResponseWriter, r *http.Request, limit int64) ([]byte, error) {
body := http.MaxBytesReader(w, r.Body, limit)
var (
bodyBytes []byte
err error
)
// Check if body is gzipped and decompress it
if r.Header.Get("Content-Encoding") == "gzip" {
reader, err := gzip.NewReader(body)
if err != nil {
return nil, fmt.Errorf("can't create gzip reader: %s", err)
}
bodyBytes, err = io.ReadAll(reader)
if err != nil {
return nil, fmt.Errorf("can't read gzip body: %s", err)
}
if err := reader.Close(); err != nil {
log.Warn(r.Context(), "can't close gzip reader: %s", err)
}
} else {
bodyBytes, err = io.ReadAll(body)
}
// Close body
if closeErr := body.Close(); closeErr != nil {
log.Warn(r.Context(), "error while closing request body: %s", closeErr)
}
if err != nil {
return nil, err
}
return bodyBytes, nil
}

View file

@ -0,0 +1,13 @@
package api
import "net/http"
type Description struct {
Path string
Handler http.HandlerFunc
Method string
}
type Handlers interface {
GetAll() []*Description
}

View file

@ -0,0 +1,41 @@
package api
import (
"net/http"
ctxStore "github.com/docker/distribution/context"
"openreplay/backend/internal/http/util"
)
func (e *routerImpl) health(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func (e *routerImpl) healthMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
w.WriteHeader(http.StatusOK)
return
}
next.ServeHTTP(w, r)
})
}
func (e *routerImpl) corsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if e.cfg.UseAccessControlHeaders {
// Prepare headers for preflight requests
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST,GET,PATCH,DELETE")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type,Authorization,Content-Encoding")
}
if r.Method == http.MethodOptions {
w.Header().Set("Cache-Control", "max-age=86400")
w.WriteHeader(http.StatusOK)
return
}
r = r.WithContext(ctxStore.WithValues(r.Context(), map[string]interface{}{"httpMethod": r.Method, "url": util.SafeString(r.URL.Path)}))
next.ServeHTTP(w, r)
})
}

View file

@ -0,0 +1,61 @@
package api
import (
"context"
"encoding/json"
"net/http"
"time"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/metrics/web"
)
type Responser struct {
metrics web.Web
}
func NewResponser(webMetrics web.Web) *Responser {
return &Responser{
metrics: webMetrics,
}
}
type response struct {
Error string `json:"error"`
}
func (r *Responser) ResponseOK(log logger.Logger, ctx context.Context, w http.ResponseWriter, requestStart time.Time, url string, bodySize int) {
w.WriteHeader(http.StatusOK)
log.Info(ctx, "response ok")
r.recordMetrics(requestStart, url, http.StatusOK, bodySize)
}
func (r *Responser) ResponseWithJSON(log logger.Logger, ctx context.Context, w http.ResponseWriter, res interface{}, requestStart time.Time, url string, bodySize int) {
log.Info(ctx, "response ok")
body, err := json.Marshal(res)
if err != nil {
log.Error(ctx, "can't marshal response: %s", err)
}
w.Header().Set("Content-Type", "application/json")
w.Write(body)
r.recordMetrics(requestStart, url, http.StatusOK, bodySize)
}
func (r *Responser) ResponseWithError(log logger.Logger, ctx context.Context, w http.ResponseWriter, code int, err error, requestStart time.Time, url string, bodySize int) {
log.Error(ctx, "response error, code: %d, error: %s", code, err)
body, err := json.Marshal(&response{err.Error()})
if err != nil {
log.Error(ctx, "can't marshal response: %s", err)
}
w.WriteHeader(code)
w.Write(body)
r.recordMetrics(requestStart, url, code, bodySize)
}
func (r *Responser) recordMetrics(requestStart time.Time, url string, code, bodySize int) {
if bodySize > 0 {
r.metrics.RecordRequestSize(float64(bodySize), url, code)
}
r.metrics.IncreaseTotalRequests()
r.metrics.RecordRequestDuration(float64(time.Now().Sub(requestStart).Milliseconds()), url, code)
}

View file

@ -0,0 +1,69 @@
package api
import (
"fmt"
"net/http"
"github.com/gorilla/mux"
"openreplay/backend/internal/config/common"
"openreplay/backend/pkg/logger"
)
type Router interface {
AddHandlers(prefix string, handlers ...Handlers)
AddMiddlewares(middlewares ...func(http.Handler) http.Handler)
Get() http.Handler
}
type routerImpl struct {
log logger.Logger
cfg *common.HTTP
router *mux.Router
}
func NewRouter(cfg *common.HTTP, log logger.Logger) (Router, error) {
switch {
case cfg == nil:
return nil, fmt.Errorf("config is empty")
case log == nil:
return nil, fmt.Errorf("logger is empty")
}
e := &routerImpl{
log: log,
cfg: cfg,
router: mux.NewRouter(),
}
e.initRouter()
return e, nil
}
func (e *routerImpl) initRouter() {
e.router.HandleFunc("/", e.health)
// Default middlewares
e.router.Use(e.healthMiddleware)
e.router.Use(e.corsMiddleware)
}
const NoPrefix = ""
func (e *routerImpl) AddHandlers(prefix string, handlers ...Handlers) {
for _, handlersSet := range handlers {
for _, handler := range handlersSet.GetAll() {
e.router.HandleFunc(handler.Path, handler.Handler).Methods(handler.Method, "OPTIONS")
if prefix != NoPrefix {
e.router.HandleFunc(prefix+handler.Path, handler.Handler).Methods(handler.Method, "OPTIONS")
}
}
}
}
func (e *routerImpl) AddMiddlewares(middlewares ...func(http.Handler) http.Handler) {
for _, middleware := range middlewares {
e.router.Use(middleware)
}
}
func (e *routerImpl) Get() http.Handler {
return e.router
}

View file

@ -2,16 +2,20 @@ package auth
import (
"fmt"
"net/http"
"strings"
"github.com/golang-jwt/jwt/v5"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/server/keys"
"openreplay/backend/pkg/server/user"
)
type Auth interface {
IsAuthorized(authHeader string, permissions []string, isExtension bool) (*User, error)
IsAuthorized(authHeader string, permissions []string, isExtension bool) (*user.User, error)
Middleware(next http.Handler) http.Handler
}
type authImpl struct {
@ -19,18 +23,20 @@ type authImpl struct {
secret string
spotSecret string
pgconn pool.Pool
keys keys.Keys
}
func NewAuth(log logger.Logger, jwtSecret, jwtSpotSecret string, conn pool.Pool) Auth {
func NewAuth(log logger.Logger, jwtSecret, jwtSpotSecret string, conn pool.Pool, keys keys.Keys) Auth {
return &authImpl{
log: log,
secret: jwtSecret,
spotSecret: jwtSpotSecret,
pgconn: conn,
keys: keys,
}
}
func parseJWT(authHeader, secret string) (*JWTClaims, error) {
func parseJWT(authHeader, secret string) (*user.JWTClaims, error) {
if authHeader == "" {
return nil, fmt.Errorf("authorization header missing")
}
@ -40,7 +46,7 @@ func parseJWT(authHeader, secret string) (*JWTClaims, error) {
}
tokenString := tokenParts[1]
claims := &JWTClaims{}
claims := &user.JWTClaims{}
token, err := jwt.ParseWithClaims(tokenString, claims,
func(token *jwt.Token) (interface{}, error) {
return []byte(secret), nil

View file

@ -1,6 +1,8 @@
package auth
func (a *authImpl) IsAuthorized(authHeader string, permissions []string, isExtension bool) (*User, error) {
import "openreplay/backend/pkg/server/user"
func (a *authImpl) IsAuthorized(authHeader string, permissions []string, isExtension bool) (*user.User, error) {
secret := a.secret
if isExtension {
secret = a.spotSecret

View file

@ -0,0 +1,65 @@
package auth
import (
"net/http"
"github.com/gorilla/mux"
ctxStore "github.com/docker/distribution/context"
)
func (e *authImpl) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
user, err := e.IsAuthorized(r.Header.Get("Authorization"), getPermissions(r.URL.Path), e.isExtensionRequest(r))
if err != nil {
if !e.isSpotWithKeyRequest(r) {
e.log.Warn(r.Context(), "Unauthorized request, wrong jwt token: %s", err)
w.WriteHeader(http.StatusUnauthorized)
return
}
user, err = e.keys.IsValid(r.URL.Query().Get("key"))
if err != nil {
e.log.Warn(r.Context(), "Unauthorized request, wrong public key: %s", err)
w.WriteHeader(http.StatusUnauthorized)
return
}
}
r = r.WithContext(ctxStore.WithValues(r.Context(), map[string]interface{}{"userData": user}))
next.ServeHTTP(w, r)
})
}
func (e *authImpl) isExtensionRequest(r *http.Request) bool {
pathTemplate, err := mux.CurrentRoute(r).GetPathTemplate()
if err != nil {
e.log.Error(r.Context(), "failed to get path template: %s", err)
} else {
if pathTemplate == "/v1/ping" ||
(pathTemplate == "/v1/spots" && r.Method == "POST") ||
(pathTemplate == "/v1/spots/{id}/uploaded" && r.Method == "POST") {
return true
}
}
return false
}
func (e *authImpl) isSpotWithKeyRequest(r *http.Request) bool {
if e.keys == nil {
return false
}
pathTemplate, err := mux.CurrentRoute(r).GetPathTemplate()
if err != nil {
return false
}
getSpotPrefix := "/v1/spots/{id}" // GET
addCommentPrefix := "/v1/spots/{id}/comment" // POST
getStatusPrefix := "/v1/spots/{id}/status" // GET
if (pathTemplate == getSpotPrefix && r.Method == "GET") ||
(pathTemplate == addCommentPrefix && r.Method == "POST") ||
(pathTemplate == getStatusPrefix && r.Method == "GET") {
return true
}
return false
}

View file

@ -1,4 +1,4 @@
package api
package auth
func getPermissions(urlPath string) []string {
return nil

View file

@ -3,10 +3,11 @@ package auth
import (
"fmt"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/server/user"
"strings"
)
func authUser(conn pool.Pool, userID, tenantID, jwtIAT int, isExtension bool) (*User, error) {
func authUser(conn pool.Pool, userID, tenantID, jwtIAT int, isExtension bool) (*user.User, error) {
sql := `
SELECT user_id, name, email, EXTRACT(epoch FROM spot_jwt_iat)::BIGINT AS spot_jwt_iat
FROM public.users
@ -15,12 +16,19 @@ func authUser(conn pool.Pool, userID, tenantID, jwtIAT int, isExtension bool) (*
if !isExtension {
sql = strings.ReplaceAll(sql, "spot_jwt_iat", "jwt_iat")
}
user := &User{TenantID: 1, AuthMethod: "jwt"}
if err := conn.QueryRow(sql, userID).Scan(&user.ID, &user.Name, &user.Email, &user.JwtIat); err != nil {
newUser := &user.User{TenantID: 1, AuthMethod: "jwt"}
if err := conn.QueryRow(sql, userID).Scan(&newUser.ID, &newUser.Name, &newUser.Email, &newUser.JwtIat); err != nil {
return nil, fmt.Errorf("user not found")
}
if user.JwtIat == 0 || abs(jwtIAT-user.JwtIat) > 1 {
if newUser.JwtIat == 0 || abs(jwtIAT-newUser.JwtIat) > 1 {
return nil, fmt.Errorf("token has been updated")
}
return user, nil
return newUser, nil
}
func abs(x int) int {
if x < 0 {
return -x
}
return x
}

View file

@ -1,15 +1,15 @@
package service
package keys
import (
"context"
"fmt"
"openreplay/backend/pkg/server/user"
"time"
"github.com/rs/xid"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/spot/auth"
)
type Key struct {
@ -22,9 +22,9 @@ type Key struct {
}
type Keys interface {
Set(spotID, expiration uint64, user *auth.User) (*Key, error)
Get(spotID uint64, user *auth.User) (*Key, error)
IsValid(key string) (*auth.User, error)
Set(spotID, expiration uint64, user *user.User) (*Key, error)
Get(spotID uint64, user *user.User) (*Key, error)
IsValid(key string) (*user.User, error)
}
type keysImpl struct {
@ -32,7 +32,7 @@ type keysImpl struct {
conn pool.Pool
}
func (k *keysImpl) Set(spotID, expiration uint64, user *auth.User) (*Key, error) {
func (k *keysImpl) Set(spotID, expiration uint64, user *user.User) (*Key, error) {
switch {
case spotID == 0:
return nil, fmt.Errorf("spotID is required")
@ -89,7 +89,7 @@ func (k *keysImpl) Set(spotID, expiration uint64, user *auth.User) (*Key, error)
return key, nil
}
func (k *keysImpl) Get(spotID uint64, user *auth.User) (*Key, error) {
func (k *keysImpl) Get(spotID uint64, user *user.User) (*Key, error) {
switch {
case spotID == 0:
return nil, fmt.Errorf("spotID is required")
@ -114,7 +114,7 @@ func (k *keysImpl) Get(spotID uint64, user *auth.User) (*Key, error) {
return key, nil
}
func (k *keysImpl) IsValid(key string) (*auth.User, error) {
func (k *keysImpl) IsValid(key string) (*user.User, error) {
if key == "" {
return nil, fmt.Errorf("key is required")
}
@ -133,7 +133,7 @@ func (k *keysImpl) IsValid(key string) (*auth.User, error) {
return nil, fmt.Errorf("key is expired")
}
// Get user info by userID
user := &auth.User{ID: userID, AuthMethod: "public-key"}
user := &user.User{ID: userID, AuthMethod: "public-key"}
// We don't need tenantID here
if err := k.conn.QueryRow(getUserSQL, userID).Scan(&user.TenantID, &user.Name, &user.Email); err != nil {
k.log.Error(context.Background(), "failed to get user: %v", err)

View file

@ -1,3 +1,3 @@
package service
package keys
var getUserSQL = `SELECT 1, name, email FROM public.users WHERE user_id = $1 AND deleted_at IS NULL LIMIT 1`

View file

@ -1,4 +1,4 @@
package api
package limiter
import (
"sync"

View file

@ -0,0 +1,24 @@
package limiter
import (
"net/http"
"openreplay/backend/pkg/server/user"
)
func (rl *UserRateLimiter) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
userContext := r.Context().Value("userData")
if userContext == nil {
next.ServeHTTP(w, r)
return
}
authUser := userContext.(*user.User)
rl := rl.GetRateLimiter(authUser.ID)
if !rl.Allow() {
http.Error(w, "Too Many Requests", http.StatusTooManyRequests)
return
}
next.ServeHTTP(w, r)
})
}

View file

@ -0,0 +1,75 @@
package server
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"golang.org/x/net/http2"
"openreplay/backend/internal/config/common"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/server/api"
)
type Server struct {
server *http.Server
}
func New(handler http.Handler, host, port string, timeout time.Duration) (*Server, error) {
switch {
case port == "":
return nil, errors.New("empty server port")
case handler == nil:
return nil, errors.New("empty handler")
case timeout < 1:
return nil, fmt.Errorf("invalid timeout %d", timeout)
}
server := &http.Server{
Addr: fmt.Sprintf("%s:%s", host, port),
Handler: handler,
ReadTimeout: timeout,
WriteTimeout: timeout,
}
if err := http2.ConfigureServer(server, nil); err != nil {
return nil, fmt.Errorf("error configuring server: %s", err)
}
return &Server{
server: server,
}, nil
}
func (s *Server) Start() error {
return s.server.ListenAndServe()
}
func (s *Server) Stop() {
if err := s.server.Shutdown(context.Background()); err != nil {
fmt.Printf("error shutting down server: %s\n", err)
}
}
func Run(ctx context.Context, log logger.Logger, cfg *common.HTTP, router api.Router) {
webServer, err := New(router.Get(), cfg.HTTPHost, cfg.HTTPPort, cfg.HTTPTimeout)
if err != nil {
log.Fatal(ctx, "failed while creating server: %s", err)
}
go func() {
if err := webServer.Start(); err != nil {
log.Fatal(ctx, "http server error: %s", err)
}
}()
log.Info(ctx, "server successfully started on port %s", cfg.HTTPPort)
// Wait stop signal to shut down server gracefully
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
<-sigchan
log.Info(ctx, "shutting down the server")
webServer.Stop()
}

View file

@ -0,0 +1,11 @@
package tracer
import (
"net/http"
)
func (t *tracerImpl) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
})
}

View file

@ -0,0 +1,23 @@
package tracer
import (
"net/http"
db "openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger"
)
type Tracer interface {
Middleware(next http.Handler) http.Handler
Close() error
}
type tracerImpl struct{}
func NewTracer(log logger.Logger, conn db.Pool) (Tracer, error) {
return &tracerImpl{}, nil
}
func (t *tracerImpl) Close() error {
return nil
}

View file

@ -1,4 +1,4 @@
package auth
package user
import "github.com/golang-jwt/jwt/v5"
@ -25,10 +25,3 @@ func (u *User) HasPermission(perm string) bool {
_, ok := u.Permissions[perm]
return ok
}
func abs(x int) int {
if x < 0 {
return -x
}
return x
}

View file

@ -0,0 +1,63 @@
package api
import (
"sync"
"time"
)
type BeaconSize struct {
size int64
time time.Time
}
type BeaconCache struct {
mutex *sync.RWMutex
beaconSizeCache map[uint64]*BeaconSize
defaultLimit int64
}
func NewBeaconCache(limit int64) *BeaconCache {
cache := &BeaconCache{
mutex: &sync.RWMutex{},
beaconSizeCache: make(map[uint64]*BeaconSize),
defaultLimit: limit,
}
go cache.cleaner()
return cache
}
func (e *BeaconCache) Add(sessionID uint64, size int64) {
if size <= 0 {
return
}
e.mutex.Lock()
defer e.mutex.Unlock()
e.beaconSizeCache[sessionID] = &BeaconSize{
size: size,
time: time.Now(),
}
}
func (e *BeaconCache) Get(sessionID uint64) int64 {
e.mutex.RLock()
defer e.mutex.RUnlock()
if beaconSize, ok := e.beaconSizeCache[sessionID]; ok {
beaconSize.time = time.Now()
return beaconSize.size
}
return e.defaultLimit
}
func (e *BeaconCache) cleaner() {
for {
time.Sleep(time.Minute * 2)
now := time.Now()
e.mutex.Lock()
for sid, bs := range e.beaconSizeCache {
if now.Sub(bs.time) > time.Minute*3 {
delete(e.beaconSizeCache, sid)
}
}
e.mutex.Unlock()
}
}

View file

@ -0,0 +1,379 @@
package mobile
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"strconv"
"strings"
"time"
"github.com/Masterminds/semver"
gzip "github.com/klauspost/pgzip"
httpCfg "openreplay/backend/internal/config/http"
"openreplay/backend/internal/http/geoip"
"openreplay/backend/internal/http/ios"
"openreplay/backend/internal/http/uaparser"
"openreplay/backend/internal/http/uuid"
"openreplay/backend/pkg/conditions"
"openreplay/backend/pkg/db/postgres"
"openreplay/backend/pkg/flakeid"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/projects"
"openreplay/backend/pkg/queue/types"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/sessions"
"openreplay/backend/pkg/token"
)
func checkMobileTrackerVersion(ver string) bool {
c, err := semver.NewConstraint(">=1.0.9")
if err != nil {
return false
}
// Check for beta version
parts := strings.Split(ver, "-")
if len(parts) > 1 {
ver = parts[0]
}
v, err := semver.NewVersion(ver)
if err != nil {
return false
}
return c.Check(v)
}
type handlersImpl struct {
log logger.Logger
cfg *httpCfg.Config
responser *api.Responser
producer types.Producer
projects projects.Projects
sessions sessions.Sessions
uaParser *uaparser.UAParser
geoIP geoip.GeoParser
tokenizer *token.Tokenizer
conditions conditions.Conditions
flaker *flakeid.Flaker
features map[string]bool
}
func NewHandlers(cfg *httpCfg.Config, log logger.Logger, responser *api.Responser, producer types.Producer, projects projects.Projects,
sessions sessions.Sessions, uaParser *uaparser.UAParser, geoIP geoip.GeoParser, tokenizer *token.Tokenizer,
conditions conditions.Conditions, flaker *flakeid.Flaker) (api.Handlers, error) {
return &handlersImpl{
log: log,
cfg: cfg,
responser: responser,
producer: producer,
projects: projects,
sessions: sessions,
uaParser: uaParser,
geoIP: geoIP,
tokenizer: tokenizer,
conditions: conditions,
flaker: flaker,
features: map[string]bool{
"feature-flags": cfg.IsFeatureFlagEnabled,
"usability-test": cfg.IsUsabilityTestEnabled,
},
}, nil
}
func (e *handlersImpl) GetAll() []*api.Description {
return []*api.Description{
{"/v1/mobile/start", e.startMobileSessionHandler, "POST"},
{"/v1/mobile/i", e.pushMobileMessagesHandler, "POST"},
{"/v1/mobile/late", e.pushMobileLateMessagesHandler, "POST"},
{"/v1/mobile/images", e.mobileImagesUploadHandler, "POST"},
}
}
func (e *handlersImpl) startMobileSessionHandler(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
if r.Body == nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
return
}
body := http.MaxBytesReader(w, r.Body, e.cfg.JsonSizeLimit)
defer body.Close()
req := &StartMobileSessionRequest{}
if err := json.NewDecoder(body).Decode(req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, 0)
return
}
// Add tracker version to context
r = r.WithContext(context.WithValue(r.Context(), "tracker", req.TrackerVersion))
if req.ProjectKey == nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, errors.New("projectKey value required"), startTime, r.URL.Path, 0)
return
}
p, err := e.projects.GetProjectByKey(*req.ProjectKey)
if err != nil {
if postgres.IsNoRowsErr(err) {
logErr := fmt.Errorf("project doesn't exist or is not active, key: %s", *req.ProjectKey)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusNotFound, logErr, startTime, r.URL.Path, 0)
} else {
e.log.Error(r.Context(), "failed to get project by key: %s, err: %s", *req.ProjectKey, err)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, errors.New("can't find a project"), startTime, r.URL.Path, 0)
}
return
}
// Add projectID to context
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", p.ProjectID)))
// Check if the project supports mobile sessions
if !p.IsMobile() {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, errors.New("project doesn't support mobile sessions"), startTime, r.URL.Path, 0)
return
}
if !checkMobileTrackerVersion(req.TrackerVersion) {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUpgradeRequired, errors.New("tracker version not supported"), startTime, r.URL.Path, 0)
return
}
userUUID := uuid.GetUUID(req.UserUUID)
tokenData, err := e.tokenizer.Parse(req.Token)
if err != nil { // Starting the new one
dice := byte(rand.Intn(100)) // [0, 100)
// Use condition rate if it's set
if req.Condition != "" {
rate, err := e.conditions.GetRate(p.ProjectID, req.Condition, int(p.SampleRate))
if err != nil {
e.log.Warn(r.Context(), "can't get condition rate, condition: %s, err: %s", req.Condition, err)
} else {
p.SampleRate = byte(rate)
}
}
if dice >= p.SampleRate {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, fmt.Errorf("capture rate miss, rate: %d", p.SampleRate), startTime, r.URL.Path, 0)
return
}
ua := e.uaParser.ParseFromHTTPRequest(r)
if ua == nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, fmt.Errorf("browser not recognized, user-agent: %s", r.Header.Get("User-Agent")), startTime, r.URL.Path, 0)
return
}
sessionID, err := e.flaker.Compose(uint64(startTime.UnixMilli()))
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
return
}
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
tokenData = &token.TokenData{sessionID, 0, expTime.UnixMilli()}
// Add sessionID to context
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionID)))
geoInfo := e.geoIP.ExtractGeoData(r)
deviceType, platform, os := ios.GetIOSDeviceType(req.UserDevice), "ios", "IOS"
if req.Platform != "" && req.Platform != "ios" {
deviceType = req.UserDeviceType
platform = req.Platform
os = "Android"
}
if !req.DoNotRecord {
if err := e.sessions.Add(&sessions.Session{
SessionID: sessionID,
Platform: platform,
Timestamp: req.Timestamp,
Timezone: req.Timezone,
ProjectID: p.ProjectID,
TrackerVersion: req.TrackerVersion,
RevID: req.RevID,
UserUUID: userUUID,
UserOS: os,
UserOSVersion: req.UserOSVersion,
UserDevice: ios.MapIOSDevice(req.UserDevice),
UserDeviceType: deviceType,
UserCountry: geoInfo.Country,
UserState: geoInfo.State,
UserCity: geoInfo.City,
UserDeviceMemorySize: req.DeviceMemory,
UserDeviceHeapSize: req.DeviceMemory,
ScreenWidth: req.Width,
ScreenHeight: req.Height,
}); err != nil {
e.log.Warn(r.Context(), "failed to add mobile session to DB: %s", err)
}
sessStart := &messages.MobileSessionStart{
Timestamp: req.Timestamp,
ProjectID: uint64(p.ProjectID),
TrackerVersion: req.TrackerVersion,
RevID: req.RevID,
UserUUID: userUUID,
UserOS: os,
UserOSVersion: req.UserOSVersion,
UserDevice: ios.MapIOSDevice(req.UserDevice),
UserDeviceType: deviceType,
UserCountry: geoInfo.Pack(),
}
if err := e.producer.Produce(e.cfg.TopicRawMobile, tokenData.ID, sessStart.Encode()); err != nil {
e.log.Error(r.Context(), "failed to send mobile sessionStart event to queue: %s", err)
}
}
}
e.responser.ResponseWithJSON(e.log, r.Context(), w, &StartMobileSessionResponse{
Token: e.tokenizer.Compose(*tokenData),
UserUUID: userUUID,
SessionID: strconv.FormatUint(tokenData.ID, 10),
BeaconSizeLimit: e.cfg.BeaconSizeLimit,
ImageQuality: e.cfg.MobileQuality,
FrameRate: e.cfg.MobileFps,
ProjectID: strconv.FormatUint(uint64(p.ProjectID), 10),
Features: e.features,
}, startTime, r.URL.Path, 0)
}
func (e *handlersImpl) pushMobileMessagesHandler(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
sessionData, err := e.tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
return
}
// Add sessionID and projectID to context
if info, err := e.sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawMobile)
}
func (e *handlersImpl) pushMobileLateMessagesHandler(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
sessionData, err := e.tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil && err != token.EXPIRED {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
return
}
// Check timestamps here?
e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawMobile)
}
func (e *handlersImpl) mobileImagesUploadHandler(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
sessionData, err := e.tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
return
}
// Add sessionID and projectID to context
if info, err := e.sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
if r.Body == nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
return
}
r.Body = http.MaxBytesReader(w, r.Body, e.cfg.FileSizeLimit)
defer r.Body.Close()
err = r.ParseMultipartForm(5 * 1e6) // ~5Mb
if err == http.ErrNotMultipart || err == http.ErrMissingBoundary {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnsupportedMediaType, err, startTime, r.URL.Path, 0)
return
} else if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging
return
}
if r.MultipartForm == nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, errors.New("multipart not parsed"), startTime, r.URL.Path, 0)
return
}
if len(r.MultipartForm.Value["projectKey"]) == 0 {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, errors.New("projectKey parameter missing"), startTime, r.URL.Path, 0) // status for missing/wrong parameter?
return
}
for _, fileHeaderList := range r.MultipartForm.File {
for _, fileHeader := range fileHeaderList {
file, err := fileHeader.Open()
if err != nil {
continue
}
data, err := io.ReadAll(file)
if err != nil {
file.Close()
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
return
}
file.Close()
if err := e.producer.Produce(e.cfg.TopicRawImages, sessionData.ID, data); err != nil {
e.log.Warn(r.Context(), "failed to send image to queue: %s", err)
}
}
}
e.responser.ResponseOK(e.log, r.Context(), w, startTime, r.URL.Path, 0)
}
func (e *handlersImpl) pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) {
start := time.Now()
body := http.MaxBytesReader(w, r.Body, e.cfg.BeaconSizeLimit)
defer body.Close()
var reader io.ReadCloser
var err error
switch r.Header.Get("Content-Encoding") {
case "gzip":
reader, err = gzip.NewReader(body)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, start, r.URL.Path, 0)
return
}
defer reader.Close()
default:
reader = body
}
buf, err := io.ReadAll(reader)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, start, r.URL.Path, 0)
return
}
if err := e.producer.Produce(topicName, sessionID, buf); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, start, r.URL.Path, 0)
return
}
w.WriteHeader(http.StatusOK)
e.log.Info(r.Context(), "response ok")
}

View file

@ -1,10 +1,4 @@
package router
type NotStartedRequest struct {
ProjectKey *string `json:"projectKey"`
TrackerVersion string `json:"trackerVersion"`
DoNotTrack bool `json:"DoNotTrack"`
}
package mobile
type StartMobileSessionRequest struct {
Token string `json:"token"`

View file

@ -0,0 +1,512 @@
package web
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"math/rand"
"net/http"
"strconv"
"strings"
"time"
"github.com/Masterminds/semver"
httpCfg "openreplay/backend/internal/config/http"
"openreplay/backend/internal/http/geoip"
"openreplay/backend/internal/http/uaparser"
"openreplay/backend/internal/http/util"
"openreplay/backend/internal/http/uuid"
"openreplay/backend/pkg/conditions"
"openreplay/backend/pkg/db/postgres"
"openreplay/backend/pkg/flakeid"
"openreplay/backend/pkg/logger"
. "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/projects"
"openreplay/backend/pkg/queue/types"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/sessions"
beacons "openreplay/backend/pkg/sessions/api"
"openreplay/backend/pkg/token"
)
type handlersImpl struct {
log logger.Logger
cfg *httpCfg.Config
responser *api.Responser
producer types.Producer
projects projects.Projects
sessions sessions.Sessions
uaParser *uaparser.UAParser
geoIP geoip.GeoParser
tokenizer *token.Tokenizer
conditions conditions.Conditions
flaker *flakeid.Flaker
beaconSizeCache *beacons.BeaconCache
features map[string]bool
}
func NewHandlers(cfg *httpCfg.Config, log logger.Logger, responser *api.Responser, producer types.Producer, projects projects.Projects,
sessions sessions.Sessions, uaParser *uaparser.UAParser, geoIP geoip.GeoParser, tokenizer *token.Tokenizer,
conditions conditions.Conditions, flaker *flakeid.Flaker) (api.Handlers, error) {
return &handlersImpl{
log: log,
cfg: cfg,
responser: responser,
producer: producer,
projects: projects,
sessions: sessions,
uaParser: uaParser,
geoIP: geoIP,
tokenizer: tokenizer,
conditions: conditions,
flaker: flaker,
beaconSizeCache: beacons.NewBeaconCache(cfg.BeaconSizeLimit),
features: map[string]bool{
"feature-flags": cfg.IsFeatureFlagEnabled,
"usability-test": cfg.IsUsabilityTestEnabled,
},
}, nil
}
func (e *handlersImpl) GetAll() []*api.Description {
return []*api.Description{
{"/v1/web/not-started", e.notStartedHandlerWeb, "POST"},
{"/v1/web/start", e.startSessionHandlerWeb, "POST"},
{"/v1/web/i", e.pushMessagesHandlerWeb, "POST"},
{"/v1/web/images", e.imagesUploaderHandlerWeb, "POST"},
}
}
func getSessionTimestamp(req *StartSessionRequest, startTimeMili int64) (ts uint64) {
ts = uint64(req.Timestamp)
if req.IsOffline {
return
}
c, err := semver.NewConstraint(">=4.1.6")
if err != nil {
return
}
ver := req.TrackerVersion
parts := strings.Split(ver, "-")
if len(parts) > 1 {
ver = parts[0]
}
v, err := semver.NewVersion(ver)
if err != nil {
return
}
if c.Check(v) {
ts = uint64(startTimeMili)
if req.BufferDiff > 0 && req.BufferDiff < 5*60*1000 {
ts -= req.BufferDiff
}
}
return
}
func (e *handlersImpl) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Check request body
if r.Body == nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
return
}
bodyBytes, err := api.ReadCompressedBody(e.log, w, r, e.cfg.JsonSizeLimit)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
// Parse request body
req := &StartSessionRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
// Add tracker version to context
r = r.WithContext(context.WithValue(r.Context(), "tracker", req.TrackerVersion))
// Handler's logic
if req.ProjectKey == nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, bodySize)
return
}
p, err := e.projects.GetProjectByKey(*req.ProjectKey)
if err != nil {
if postgres.IsNoRowsErr(err) {
logErr := fmt.Errorf("project doesn't exist or is not active, key: %s", *req.ProjectKey)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusNotFound, logErr, startTime, r.URL.Path, bodySize)
} else {
e.log.Error(r.Context(), "failed to get project by key: %s, err: %s", *req.ProjectKey, err)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, errors.New("can't find a project"), startTime, r.URL.Path, bodySize)
}
return
}
// Add projectID to context
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", p.ProjectID)))
// Check if the project supports mobile sessions
if !p.IsWeb() {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, errors.New("project doesn't support web sessions"), startTime, r.URL.Path, bodySize)
return
}
ua := e.uaParser.ParseFromHTTPRequest(r)
if ua == nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, fmt.Errorf("browser not recognized, user-agent: %s", r.Header.Get("User-Agent")), startTime, r.URL.Path, bodySize)
return
}
geoInfo := e.geoIP.ExtractGeoData(r)
userUUID := uuid.GetUUID(req.UserUUID)
tokenData, err := e.tokenizer.Parse(req.Token)
if err != nil || req.Reset { // Starting the new one
dice := byte(rand.Intn(100))
// Use condition rate if it's set
if req.Condition != "" {
rate, err := e.conditions.GetRate(p.ProjectID, req.Condition, int(p.SampleRate))
if err != nil {
e.log.Warn(r.Context(), "can't get condition rate, condition: %s, err: %s", req.Condition, err)
} else {
p.SampleRate = byte(rate)
}
}
if dice >= p.SampleRate {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, fmt.Errorf("capture rate miss, rate: %d", p.SampleRate), startTime, r.URL.Path, bodySize)
return
}
startTimeMili := startTime.UnixMilli()
sessionID, err := e.flaker.Compose(uint64(startTimeMili))
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
tokenData = &token.TokenData{
ID: sessionID,
Delay: startTimeMili - req.Timestamp,
ExpTime: expTime.UnixMilli(),
}
// Add sessionID to context
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionID)))
if recordSession(req) {
sessionStart := &SessionStart{
Timestamp: getSessionTimestamp(req, startTimeMili),
ProjectID: uint64(p.ProjectID),
TrackerVersion: req.TrackerVersion,
RevID: req.RevID,
UserUUID: userUUID,
UserAgent: r.Header.Get("User-Agent"),
UserOS: ua.OS,
UserOSVersion: ua.OSVersion,
UserBrowser: ua.Browser,
UserBrowserVersion: ua.BrowserVersion,
UserDevice: ua.Device,
UserDeviceType: ua.DeviceType,
UserCountry: geoInfo.Pack(),
UserDeviceMemorySize: req.DeviceMemory,
UserDeviceHeapSize: req.JsHeapSizeLimit,
UserID: req.UserID,
}
// Save sessionStart to db
if err := e.sessions.Add(&sessions.Session{
SessionID: sessionID,
Platform: "web",
Timestamp: sessionStart.Timestamp,
Timezone: req.Timezone,
ProjectID: uint32(sessionStart.ProjectID),
TrackerVersion: sessionStart.TrackerVersion,
RevID: sessionStart.RevID,
UserUUID: sessionStart.UserUUID,
UserOS: sessionStart.UserOS,
UserOSVersion: sessionStart.UserOSVersion,
UserDevice: sessionStart.UserDevice,
UserCountry: geoInfo.Country,
UserState: geoInfo.State,
UserCity: geoInfo.City,
UserAgent: sessionStart.UserAgent,
UserBrowser: sessionStart.UserBrowser,
UserBrowserVersion: sessionStart.UserBrowserVersion,
UserDeviceType: sessionStart.UserDeviceType,
UserDeviceMemorySize: sessionStart.UserDeviceMemorySize,
UserDeviceHeapSize: sessionStart.UserDeviceHeapSize,
UserID: &sessionStart.UserID,
ScreenWidth: req.Width,
ScreenHeight: req.Height,
}); err != nil {
e.log.Warn(r.Context(), "can't insert sessionStart to DB: %s", err)
}
// Send sessionStart message to kafka
if err := e.producer.Produce(e.cfg.TopicRawWeb, tokenData.ID, sessionStart.Encode()); err != nil {
e.log.Error(r.Context(), "can't send sessionStart to queue: %s", err)
}
}
} else {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", tokenData.ID)))
}
// Save information about session beacon size
e.beaconSizeCache.Add(tokenData.ID, p.BeaconSize)
startResponse := &StartSessionResponse{
Token: e.tokenizer.Compose(*tokenData),
UserUUID: userUUID,
UserOS: ua.OS,
UserDevice: ua.Device,
UserBrowser: ua.Browser,
UserCountry: geoInfo.Country,
UserState: geoInfo.State,
UserCity: geoInfo.City,
SessionID: strconv.FormatUint(tokenData.ID, 10),
ProjectID: strconv.FormatUint(uint64(p.ProjectID), 10),
BeaconSizeLimit: e.beaconSizeCache.Get(tokenData.ID),
CompressionThreshold: e.cfg.CompressionThreshold,
StartTimestamp: int64(flakeid.ExtractTimestamp(tokenData.ID)),
Delay: tokenData.Delay,
CanvasEnabled: e.cfg.RecordCanvas,
CanvasImageQuality: e.cfg.CanvasQuality,
CanvasFrameRate: e.cfg.CanvasFps,
Features: e.features,
}
modifyResponse(req, startResponse)
e.responser.ResponseWithJSON(e.log, r.Context(), w, startResponse, startTime, r.URL.Path, bodySize)
}
func (e *handlersImpl) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Get debug header with batch info
if batch := r.URL.Query().Get("batch"); batch != "" {
r = r.WithContext(context.WithValue(r.Context(), "batch", batch))
}
// Check authorization
sessionData, err := e.tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
tokenJustExpired := false
if err != nil {
if errors.Is(err, token.JUST_EXPIRED) {
tokenJustExpired = true
} else {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
}
// Add sessionID and projectID to context
if info, err := e.sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
// Check request body
if r.Body == nil {
errCode := http.StatusBadRequest
if tokenJustExpired {
errCode = http.StatusUnauthorized
}
e.responser.ResponseWithError(e.log, r.Context(), w, errCode, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
return
}
bodyBytes, err := api.ReadCompressedBody(e.log, w, r, e.beaconSizeCache.Get(sessionData.ID))
if err != nil {
errCode := http.StatusRequestEntityTooLarge
if tokenJustExpired {
errCode = http.StatusUnauthorized
}
e.responser.ResponseWithError(e.log, r.Context(), w, errCode, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
// Send processed messages to queue as array of bytes
err = e.producer.Produce(e.cfg.TopicRawWeb, sessionData.ID, bodyBytes)
if err != nil {
e.log.Error(r.Context(), "can't send messages batch to queue: %s", err)
errCode := http.StatusInternalServerError
if tokenJustExpired {
errCode = http.StatusUnauthorized
}
e.responser.ResponseWithError(e.log, r.Context(), w, errCode, errors.New("can't save message, try again"), startTime, r.URL.Path, bodySize)
return
}
if tokenJustExpired {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnauthorized, errors.New("token expired"), startTime, r.URL.Path, bodySize)
return
}
e.responser.ResponseOK(e.log, r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *handlersImpl) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
if r.Body == nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
return
}
bodyBytes, err := api.ReadCompressedBody(e.log, w, r, e.cfg.JsonSizeLimit)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
req := &NotStartedRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
// Add tracker version to context
r = r.WithContext(context.WithValue(r.Context(), "tracker", req.TrackerVersion))
// Handler's logic
if req.ProjectKey == nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, errors.New("projectKey value required"), startTime, r.URL.Path, bodySize)
return
}
p, err := e.projects.GetProjectByKey(*req.ProjectKey)
if err != nil {
if postgres.IsNoRowsErr(err) {
logErr := fmt.Errorf("project doesn't exist or is not active, key: %s", *req.ProjectKey)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusNotFound, logErr, startTime, r.URL.Path, bodySize)
} else {
e.log.Error(r.Context(), "can't find a project: %s", err)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, errors.New("can't find a project"), startTime, r.URL.Path, bodySize)
}
return
}
// Add projectID to context
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", p.ProjectID)))
ua := e.uaParser.ParseFromHTTPRequest(r)
if ua == nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, fmt.Errorf("browser not recognized, user-agent: %s", r.Header.Get("User-Agent")), startTime, r.URL.Path, bodySize)
return
}
geoInfo := e.geoIP.ExtractGeoData(r)
err = e.sessions.AddUnStarted(&sessions.UnStartedSession{
ProjectKey: *req.ProjectKey,
TrackerVersion: req.TrackerVersion,
DoNotTrack: req.DoNotTrack,
Platform: "web",
UserAgent: r.Header.Get("User-Agent"),
UserOS: ua.OS,
UserOSVersion: ua.OSVersion,
UserBrowser: ua.Browser,
UserBrowserVersion: ua.BrowserVersion,
UserDevice: ua.Device,
UserDeviceType: ua.DeviceType,
UserCountry: geoInfo.Country,
UserState: geoInfo.State,
UserCity: geoInfo.City,
})
if err != nil {
e.log.Warn(r.Context(), "can't insert un-started session: %s", err)
}
// response ok anyway
e.responser.ResponseOK(e.log, r.Context(), w, startTime, r.URL.Path, bodySize)
}
type ScreenshotMessage struct {
Name string
Data []byte
}
func (e *handlersImpl) imagesUploaderHandlerWeb(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
sessionData, err := e.tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil { // Should accept expired token?
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
return
}
// Add sessionID and projectID to context
if info, err := e.sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
if r.Body == nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
return
}
r.Body = http.MaxBytesReader(w, r.Body, e.cfg.FileSizeLimit)
defer r.Body.Close()
// Parse the multipart form
err = r.ParseMultipartForm(10 << 20) // Max upload size 10 MB
if err == http.ErrNotMultipart || err == http.ErrMissingBoundary {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnsupportedMediaType, err, startTime, r.URL.Path, 0)
return
} else if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
return
}
// Iterate over uploaded files
for _, fileHeaderList := range r.MultipartForm.File {
for _, fileHeader := range fileHeaderList {
file, err := fileHeader.Open()
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
return
}
// Read the file content
fileBytes, err := io.ReadAll(file)
if err != nil {
file.Close()
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
return
}
file.Close()
fileName := util.SafeString(fileHeader.Filename)
// Create a message to send to Kafka
msg := ScreenshotMessage{
Name: fileName,
Data: fileBytes,
}
data, err := json.Marshal(&msg)
if err != nil {
e.log.Warn(r.Context(), "can't marshal screenshot message, err: %s", err)
continue
}
// Send the message to queue
if err := e.producer.Produce(e.cfg.TopicCanvasImages, sessionData.ID, data); err != nil {
e.log.Warn(r.Context(), "can't send screenshot message to queue, err: %s", err)
}
}
}
e.responser.ResponseOK(e.log, r.Context(), w, startTime, r.URL.Path, 0)
}

View file

@ -1,4 +1,10 @@
package router
package web
type NotStartedRequest struct {
ProjectKey *string `json:"projectKey"`
TrackerVersion string `json:"trackerVersion"`
DoNotTrack bool `json:"DoNotTrack"`
}
type StartSessionRequest struct {
Token string `json:"token"`

View file

@ -2,12 +2,10 @@ package api
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"strings"
@ -15,59 +13,106 @@ import (
"github.com/gorilla/mux"
metrics "openreplay/backend/pkg/metrics/spot"
spotConfig "openreplay/backend/internal/config/spot"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/objectstorage"
"openreplay/backend/pkg/spot/auth"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/server/keys"
"openreplay/backend/pkg/server/user"
"openreplay/backend/pkg/spot/service"
"openreplay/backend/pkg/spot/transcoder"
)
func (e *Router) createSpot(w http.ResponseWriter, r *http.Request) {
type handlersImpl struct {
log logger.Logger
responser *api.Responser
jsonSizeLimit int64
spots service.Spots
objStorage objectstorage.ObjectStorage
transcoder transcoder.Transcoder
keys keys.Keys
}
func NewHandlers(log logger.Logger, cfg *spotConfig.Config, responser *api.Responser, spots service.Spots, objStore objectstorage.ObjectStorage, transcoder transcoder.Transcoder, keys keys.Keys) (api.Handlers, error) {
return &handlersImpl{
log: log,
responser: responser,
jsonSizeLimit: cfg.JsonSizeLimit,
spots: spots,
objStorage: objStore,
transcoder: transcoder,
keys: keys,
}, nil
}
func (e *handlersImpl) GetAll() []*api.Description {
return []*api.Description{
{"/v1/spots", e.createSpot, "POST"},
{"/v1/spots/{id}", e.getSpot, "GET"},
{"/v1/spots/{id}", e.updateSpot, "PATCH"},
{"/v1/spots", e.getSpots, "GET"},
{"/v1/spots", e.deleteSpots, "DELETE"},
{"/v1/spots/{id}/comment", e.addComment, "POST"},
{"/v1/spots/{id}/uploaded", e.uploadedSpot, "POST"},
{"/v1/spots/{id}/video", e.getSpotVideo, "GET"},
{"/v1/spots/{id}/public-key", e.getPublicKey, "GET"},
{"/v1/spots/{id}/public-key", e.updatePublicKey, "PATCH"},
{"/v1/spots/{id}/status", e.spotStatus, "GET"},
{"/v1/ping", e.ping, "GET"},
}
}
func (e *handlersImpl) ping(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func (e *handlersImpl) createSpot(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
bodyBytes, err := api.ReadBody(e.log, w, r, e.jsonSizeLimit)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
req := &CreateSpotRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
// Creat a spot
currUser := r.Context().Value("userData").(*auth.User)
newSpot, err := e.services.Spots.Add(currUser, req.Name, req.Comment, req.Duration, req.Crop)
currUser := r.Context().Value("userData").(*user.User)
newSpot, err := e.spots.Add(currUser, req.Name, req.Comment, req.Duration, req.Crop)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
// Parse and upload preview image
previewImage, err := getSpotPreview(req.Preview)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
previewName := fmt.Sprintf("%d/preview.jpeg", newSpot.ID)
if err = e.services.ObjStorage.Upload(bytes.NewReader(previewImage), previewName, "image/jpeg", objectstorage.NoCompression); err != nil {
if err = e.objStorage.Upload(bytes.NewReader(previewImage), previewName, "image/jpeg", objectstorage.NoCompression); err != nil {
e.log.Error(r.Context(), "can't upload preview image: %s", err)
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, errors.New("can't upload preview image"), startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, errors.New("can't upload preview image"), startTime, r.URL.Path, bodySize)
return
}
mobURL, err := e.getUploadMobURL(newSpot.ID)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
videoURL, err := e.getUploadVideoURL(newSpot.ID)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
@ -76,7 +121,7 @@ func (e *Router) createSpot(w http.ResponseWriter, r *http.Request) {
MobURL: mobURL,
VideoURL: videoURL,
}
e.ResponseWithJSON(r.Context(), w, resp, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithJSON(e.log, r.Context(), w, resp, startTime, r.URL.Path, bodySize)
}
func getSpotPreview(preview string) ([]byte, error) {
@ -93,18 +138,18 @@ func getSpotPreview(preview string) ([]byte, error) {
return data, nil
}
func (e *Router) getUploadMobURL(spotID uint64) (string, error) {
func (e *handlersImpl) getUploadMobURL(spotID uint64) (string, error) {
mobKey := fmt.Sprintf("%d/events.mob", spotID)
mobURL, err := e.services.ObjStorage.GetPreSignedUploadUrl(mobKey)
mobURL, err := e.objStorage.GetPreSignedUploadUrl(mobKey)
if err != nil {
return "", fmt.Errorf("can't get mob URL: %s", err)
}
return mobURL, nil
}
func (e *Router) getUploadVideoURL(spotID uint64) (string, error) {
func (e *handlersImpl) getUploadVideoURL(spotID uint64) (string, error) {
mobKey := fmt.Sprintf("%d/video.webm", spotID)
mobURL, err := e.services.ObjStorage.GetPreSignedUploadUrl(mobKey)
mobURL, err := e.objStorage.GetPreSignedUploadUrl(mobKey)
if err != nil {
return "", fmt.Errorf("can't get video URL: %s", err)
}
@ -143,51 +188,51 @@ func getSpotsRequest(r *http.Request) (*GetSpotsRequest, error) {
return req, nil
}
func (e *Router) getPreviewURL(spotID uint64) (string, error) {
func (e *handlersImpl) getPreviewURL(spotID uint64) (string, error) {
previewKey := fmt.Sprintf("%d/preview.jpeg", spotID)
previewURL, err := e.services.ObjStorage.GetPreSignedDownloadUrl(previewKey)
previewURL, err := e.objStorage.GetPreSignedDownloadUrl(previewKey)
if err != nil {
return "", fmt.Errorf("can't get preview URL: %s", err)
}
return previewURL, nil
}
func (e *Router) getMobURL(spotID uint64) (string, error) {
func (e *handlersImpl) getMobURL(spotID uint64) (string, error) {
mobKey := fmt.Sprintf("%d/events.mob", spotID)
mobURL, err := e.services.ObjStorage.GetPreSignedDownloadUrl(mobKey)
mobURL, err := e.objStorage.GetPreSignedDownloadUrl(mobKey)
if err != nil {
return "", fmt.Errorf("can't get mob URL: %s", err)
}
return mobURL, nil
}
func (e *Router) getVideoURL(spotID uint64) (string, error) {
func (e *handlersImpl) getVideoURL(spotID uint64) (string, error) {
mobKey := fmt.Sprintf("%d/video.webm", spotID) // TODO: later return url to m3u8 file
mobURL, err := e.services.ObjStorage.GetPreSignedDownloadUrl(mobKey)
mobURL, err := e.objStorage.GetPreSignedDownloadUrl(mobKey)
if err != nil {
return "", fmt.Errorf("can't get video URL: %s", err)
}
return mobURL, nil
}
func (e *Router) getSpot(w http.ResponseWriter, r *http.Request) {
func (e *handlersImpl) getSpot(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
id, err := getSpotID(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
user := r.Context().Value("userData").(*auth.User)
res, err := e.services.Spots.GetByID(user, id)
user := r.Context().Value("userData").(*user.User)
res, err := e.spots.GetByID(user, id)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
if res == nil {
e.ResponseWithError(r.Context(), w, http.StatusNotFound, fmt.Errorf("spot not found"), startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusNotFound, fmt.Errorf("spot not found"), startTime, r.URL.Path, bodySize)
return
}
@ -197,12 +242,12 @@ func (e *Router) getSpot(w http.ResponseWriter, r *http.Request) {
}
mobURL, err := e.getMobURL(id)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
videoURL, err := e.getVideoURL(id)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
@ -216,60 +261,60 @@ func (e *Router) getSpot(w http.ResponseWriter, r *http.Request) {
MobURL: mobURL,
VideoURL: videoURL,
}
playlist, err := e.services.Transcoder.GetSpotStreamPlaylist(id)
playlist, err := e.transcoder.GetSpotStreamPlaylist(id)
if err != nil {
e.log.Warn(r.Context(), "can't get stream playlist: %s", err)
} else {
spotInfo.StreamFile = base64.StdEncoding.EncodeToString(playlist)
}
e.ResponseWithJSON(r.Context(), w, &GetSpotResponse{Spot: spotInfo}, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithJSON(e.log, r.Context(), w, &GetSpotResponse{Spot: spotInfo}, startTime, r.URL.Path, bodySize)
}
func (e *Router) updateSpot(w http.ResponseWriter, r *http.Request) {
func (e *handlersImpl) updateSpot(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
id, err := getSpotID(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
bodyBytes, err := api.ReadBody(e.log, w, r, e.jsonSizeLimit)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
req := &UpdateSpotRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
user := r.Context().Value("userData").(*auth.User)
_, err = e.services.Spots.UpdateName(user, id, req.Name)
user := r.Context().Value("userData").(*user.User)
_, err = e.spots.UpdateName(user, id, req.Name)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, bodySize)
e.responser.ResponseOK(e.log, r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *Router) getSpots(w http.ResponseWriter, r *http.Request) {
func (e *handlersImpl) getSpots(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
req, err := getSpotsRequest(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
user := r.Context().Value("userData").(*auth.User)
user := r.Context().Value("userData").(*user.User)
opts := &service.GetOpts{
NameFilter: req.Query, Order: req.Order, Page: req.Page, Limit: req.Limit}
switch req.FilterBy {
@ -278,9 +323,9 @@ func (e *Router) getSpots(w http.ResponseWriter, r *http.Request) {
default:
opts.TenantID = user.TenantID
}
spots, total, tenantHasSpots, err := e.services.Spots.Get(user, opts)
spots, total, tenantHasSpots, err := e.spots.Get(user, opts)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
res := make([]ShortInfo, 0, len(spots))
@ -298,82 +343,82 @@ func (e *Router) getSpots(w http.ResponseWriter, r *http.Request) {
PreviewURL: previewUrl,
})
}
e.ResponseWithJSON(r.Context(), w, &GetSpotsResponse{Spots: res, Total: total, TenantHasSpots: tenantHasSpots}, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithJSON(e.log, r.Context(), w, &GetSpotsResponse{Spots: res, Total: total, TenantHasSpots: tenantHasSpots}, startTime, r.URL.Path, bodySize)
}
func (e *Router) deleteSpots(w http.ResponseWriter, r *http.Request) {
func (e *handlersImpl) deleteSpots(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
bodyBytes, err := api.ReadBody(e.log, w, r, e.jsonSizeLimit)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
req := &DeleteSpotRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
spotsToDelete := make([]uint64, 0, len(req.SpotIDs))
for _, idStr := range req.SpotIDs {
id, err := strconv.ParseUint(idStr, 10, 64)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, fmt.Errorf("invalid spot id: %s", idStr), startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, fmt.Errorf("invalid spot id: %s", idStr), startTime, r.URL.Path, bodySize)
return
}
spotsToDelete = append(spotsToDelete, id)
}
user := r.Context().Value("userData").(*auth.User)
if err := e.services.Spots.Delete(user, spotsToDelete); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
user := r.Context().Value("userData").(*user.User)
if err := e.spots.Delete(user, spotsToDelete); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, bodySize)
e.responser.ResponseOK(e.log, r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *Router) addComment(w http.ResponseWriter, r *http.Request) {
func (e *handlersImpl) addComment(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
id, err := getSpotID(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
bodyBytes, err := api.ReadBody(e.log, w, r, e.jsonSizeLimit)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
req := &AddCommentRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
user := r.Context().Value("userData").(*auth.User)
updatedSpot, err := e.services.Spots.AddComment(user, id, &service.Comment{UserName: req.UserName, Text: req.Comment})
user := r.Context().Value("userData").(*user.User)
updatedSpot, err := e.spots.AddComment(user, id, &service.Comment{UserName: req.UserName, Text: req.Comment})
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
mobURL, err := e.getMobURL(id)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
videoURL, err := e.getVideoURL(id)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
@ -385,70 +430,70 @@ func (e *Router) addComment(w http.ResponseWriter, r *http.Request) {
MobURL: mobURL,
VideoURL: videoURL,
}
e.ResponseWithJSON(r.Context(), w, &GetSpotResponse{Spot: spotInfo}, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithJSON(e.log, r.Context(), w, &GetSpotResponse{Spot: spotInfo}, startTime, r.URL.Path, bodySize)
}
func (e *Router) uploadedSpot(w http.ResponseWriter, r *http.Request) {
func (e *handlersImpl) uploadedSpot(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
id, err := getSpotID(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
user := r.Context().Value("userData").(*auth.User)
spot, err := e.services.Spots.GetByID(user, id) // check if spot exists
user := r.Context().Value("userData").(*user.User)
spot, err := e.spots.GetByID(user, id) // check if spot exists
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
e.log.Info(r.Context(), "uploaded spot %+v, from user: %+v", spot, user)
if err := e.services.Transcoder.Process(spot); err != nil {
if err := e.transcoder.Process(spot); err != nil {
e.log.Error(r.Context(), "can't add transcoding task: %s", err)
}
e.ResponseOK(r.Context(), w, startTime, r.URL.Path, bodySize)
e.responser.ResponseOK(e.log, r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *Router) getSpotVideo(w http.ResponseWriter, r *http.Request) {
func (e *handlersImpl) getSpotVideo(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
id, err := getSpotID(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
key := fmt.Sprintf("%d/video.webm", id)
videoURL, err := e.services.ObjStorage.GetPreSignedDownloadUrl(key)
videoURL, err := e.objStorage.GetPreSignedDownloadUrl(key)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
resp := map[string]interface{}{
"url": videoURL,
}
e.ResponseWithJSON(r.Context(), w, resp, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithJSON(e.log, r.Context(), w, resp, startTime, r.URL.Path, bodySize)
}
func (e *Router) getSpotStream(w http.ResponseWriter, r *http.Request) {
func (e *handlersImpl) getSpotStream(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
id, err := getSpotID(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
// Example data to serve as the file content
streamPlaylist, err := e.services.Transcoder.GetSpotStreamPlaylist(id)
streamPlaylist, err := e.transcoder.GetSpotStreamPlaylist(id)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
@ -462,144 +507,90 @@ func (e *Router) getSpotStream(w http.ResponseWriter, r *http.Request) {
// Write the content of the buffer to the response writer
if _, err := buffer.WriteTo(w); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
}
func (e *Router) getPublicKey(w http.ResponseWriter, r *http.Request) {
func (e *handlersImpl) getPublicKey(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
id, err := getSpotID(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
user := r.Context().Value("userData").(*auth.User)
key, err := e.services.Keys.Get(id, user)
user := r.Context().Value("userData").(*user.User)
key, err := e.keys.Get(id, user)
if err != nil {
if strings.Contains(err.Error(), "not found") {
e.ResponseWithError(r.Context(), w, http.StatusNotFound, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusNotFound, err, startTime, r.URL.Path, bodySize)
} else {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
}
return
}
resp := map[string]interface{}{
"key": key,
}
e.ResponseWithJSON(r.Context(), w, resp, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithJSON(e.log, r.Context(), w, resp, startTime, r.URL.Path, bodySize)
}
func (e *Router) updatePublicKey(w http.ResponseWriter, r *http.Request) {
func (e *handlersImpl) updatePublicKey(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
id, err := getSpotID(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
bodyBytes, err := api.ReadBody(e.log, w, r, e.jsonSizeLimit)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
req := &UpdateSpotPublicKeyRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
user := r.Context().Value("userData").(*auth.User)
key, err := e.services.Keys.Set(id, req.Expiration, user)
user := r.Context().Value("userData").(*user.User)
key, err := e.keys.Set(id, req.Expiration, user)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
resp := map[string]interface{}{
"key": key,
}
e.ResponseWithJSON(r.Context(), w, resp, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithJSON(e.log, r.Context(), w, resp, startTime, r.URL.Path, bodySize)
}
func (e *Router) spotStatus(w http.ResponseWriter, r *http.Request) {
func (e *handlersImpl) spotStatus(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
id, err := getSpotID(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
user := r.Context().Value("userData").(*auth.User)
status, err := e.services.Spots.GetStatus(user, id)
user := r.Context().Value("userData").(*user.User)
status, err := e.spots.GetStatus(user, id)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
resp := map[string]interface{}{
"status": status,
}
e.ResponseWithJSON(r.Context(), w, resp, startTime, r.URL.Path, bodySize)
}
func recordMetrics(requestStart time.Time, url string, code, bodySize int) {
if bodySize > 0 {
metrics.RecordRequestSize(float64(bodySize), url, code)
}
metrics.IncreaseTotalRequests()
metrics.RecordRequestDuration(float64(time.Now().Sub(requestStart).Milliseconds()), url, code)
}
func (e *Router) readBody(w http.ResponseWriter, r *http.Request, limit int64) ([]byte, error) {
body := http.MaxBytesReader(w, r.Body, limit)
bodyBytes, err := io.ReadAll(body)
// Close body
if closeErr := body.Close(); closeErr != nil {
e.log.Warn(r.Context(), "error while closing request body: %s", closeErr)
}
if err != nil {
return nil, err
}
return bodyBytes, nil
}
func (e *Router) ResponseOK(ctx context.Context, w http.ResponseWriter, requestStart time.Time, url string, bodySize int) {
w.WriteHeader(http.StatusOK)
e.log.Info(ctx, "response ok")
recordMetrics(requestStart, url, http.StatusOK, bodySize)
}
func (e *Router) ResponseWithJSON(ctx context.Context, w http.ResponseWriter, res interface{}, requestStart time.Time, url string, bodySize int) {
e.log.Info(ctx, "response ok")
body, err := json.Marshal(res)
if err != nil {
e.log.Error(ctx, "can't marshal response: %s", err)
}
w.Header().Set("Content-Type", "application/json")
w.Write(body)
recordMetrics(requestStart, url, http.StatusOK, bodySize)
}
type response struct {
Error string `json:"error"`
}
func (e *Router) ResponseWithError(ctx context.Context, w http.ResponseWriter, code int, err error, requestStart time.Time, url string, bodySize int) {
e.log.Error(ctx, "response error, code: %d, error: %s", code, err)
body, err := json.Marshal(&response{err.Error()})
if err != nil {
e.log.Error(ctx, "can't marshal response: %s", err)
}
w.WriteHeader(code)
w.Write(body)
recordMetrics(requestStart, url, code, bodySize)
e.responser.ResponseWithJSON(e.log, r.Context(), w, resp, startTime, r.URL.Path, bodySize)
}

View file

@ -1,213 +0,0 @@
package api
import (
"bytes"
"fmt"
"io"
"net/http"
"openreplay/backend/pkg/spot"
"openreplay/backend/pkg/spot/auth"
"sync"
"time"
"github.com/docker/distribution/context"
"github.com/gorilla/mux"
spotConfig "openreplay/backend/internal/config/spot"
"openreplay/backend/internal/http/util"
"openreplay/backend/pkg/logger"
)
type Router struct {
log logger.Logger
cfg *spotConfig.Config
router *mux.Router
mutex *sync.RWMutex
services *spot.ServicesBuilder
limiter *UserRateLimiter
}
func NewRouter(cfg *spotConfig.Config, log logger.Logger, services *spot.ServicesBuilder) (*Router, error) {
switch {
case cfg == nil:
return nil, fmt.Errorf("config is empty")
case services == nil:
return nil, fmt.Errorf("services is empty")
case log == nil:
return nil, fmt.Errorf("logger is empty")
}
e := &Router{
log: log,
cfg: cfg,
mutex: &sync.RWMutex{},
services: services,
limiter: NewUserRateLimiter(10, 30, 1*time.Minute, 5*time.Minute),
}
e.init()
return e, nil
}
func (e *Router) init() {
e.router = mux.NewRouter()
// Root route
e.router.HandleFunc("/", e.ping)
// Spot routes
e.router.HandleFunc("/v1/spots", e.createSpot).Methods("POST", "OPTIONS")
e.router.HandleFunc("/v1/spots/{id}", e.getSpot).Methods("GET", "OPTIONS")
e.router.HandleFunc("/v1/spots/{id}", e.updateSpot).Methods("PATCH", "OPTIONS")
e.router.HandleFunc("/v1/spots", e.getSpots).Methods("GET", "OPTIONS")
e.router.HandleFunc("/v1/spots", e.deleteSpots).Methods("DELETE", "OPTIONS")
e.router.HandleFunc("/v1/spots/{id}/comment", e.addComment).Methods("POST", "OPTIONS")
e.router.HandleFunc("/v1/spots/{id}/uploaded", e.uploadedSpot).Methods("POST", "OPTIONS")
e.router.HandleFunc("/v1/spots/{id}/video", e.getSpotVideo).Methods("GET", "OPTIONS")
e.router.HandleFunc("/v1/spots/{id}/public-key", e.getPublicKey).Methods("GET", "OPTIONS")
e.router.HandleFunc("/v1/spots/{id}/public-key", e.updatePublicKey).Methods("PATCH", "OPTIONS")
e.router.HandleFunc("/v1/spots/{id}/status", e.spotStatus).Methods("GET", "OPTIONS")
e.router.HandleFunc("/v1/ping", e.ping).Methods("GET", "OPTIONS")
// CORS middleware
e.router.Use(e.corsMiddleware)
e.router.Use(e.authMiddleware)
e.router.Use(e.rateLimitMiddleware)
e.router.Use(e.actionMiddleware)
}
func (e *Router) ping(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func (e *Router) corsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
next.ServeHTTP(w, r)
}
if e.cfg.UseAccessControlHeaders {
// Prepare headers for preflight requests
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST,GET,PATCH,DELETE")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type,Authorization,Content-Encoding")
}
if r.Method == http.MethodOptions {
w.Header().Set("Cache-Control", "max-age=86400")
w.WriteHeader(http.StatusOK)
return
}
r = r.WithContext(context.WithValues(r.Context(), map[string]interface{}{"httpMethod": r.Method, "url": util.SafeString(r.URL.Path)}))
next.ServeHTTP(w, r)
})
}
func (e *Router) authMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
next.ServeHTTP(w, r)
}
isExtension := false
pathTemplate, err := mux.CurrentRoute(r).GetPathTemplate()
if err != nil {
e.log.Error(r.Context(), "failed to get path template: %s", err)
} else {
if pathTemplate == "/v1/ping" ||
(pathTemplate == "/v1/spots" && r.Method == "POST") ||
(pathTemplate == "/v1/spots/{id}/uploaded" && r.Method == "POST") {
isExtension = true
}
}
// Check if the request is authorized
user, err := e.services.Auth.IsAuthorized(r.Header.Get("Authorization"), getPermissions(r.URL.Path), isExtension)
if err != nil {
e.log.Warn(r.Context(), "Unauthorized request: %s", err)
if !isSpotWithKeyRequest(r) {
w.WriteHeader(http.StatusUnauthorized)
return
}
user, err = e.services.Keys.IsValid(r.URL.Query().Get("key"))
if err != nil {
e.log.Warn(r.Context(), "Wrong public key: %s", err)
w.WriteHeader(http.StatusUnauthorized)
return
}
}
r = r.WithContext(context.WithValues(r.Context(), map[string]interface{}{"userData": user}))
next.ServeHTTP(w, r)
})
}
func isSpotWithKeyRequest(r *http.Request) bool {
pathTemplate, err := mux.CurrentRoute(r).GetPathTemplate()
if err != nil {
return false
}
getSpotPrefix := "/v1/spots/{id}" // GET
addCommentPrefix := "/v1/spots/{id}/comment" // POST
getStatusPrefix := "/v1/spots/{id}/status" // GET
if (pathTemplate == getSpotPrefix && r.Method == "GET") ||
(pathTemplate == addCommentPrefix && r.Method == "POST") ||
(pathTemplate == getStatusPrefix && r.Method == "GET") {
return true
}
return false
}
func (e *Router) rateLimitMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
next.ServeHTTP(w, r)
}
user := r.Context().Value("userData").(*auth.User)
rl := e.limiter.GetRateLimiter(user.ID)
if !rl.Allow() {
http.Error(w, "Too Many Requests", http.StatusTooManyRequests)
return
}
next.ServeHTTP(w, r)
})
}
type statusWriter struct {
http.ResponseWriter
statusCode int
}
func (w *statusWriter) WriteHeader(statusCode int) {
w.statusCode = statusCode
w.ResponseWriter.WriteHeader(statusCode)
}
func (w *statusWriter) Write(b []byte) (int, error) {
if w.statusCode == 0 {
w.statusCode = http.StatusOK // Default status code is 200
}
return w.ResponseWriter.Write(b)
}
func (e *Router) actionMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/" {
next.ServeHTTP(w, r)
}
// Read body and restore the io.ReadCloser to its original state
bodyBytes, err := io.ReadAll(r.Body)
if err != nil {
http.Error(w, "can't read body", http.StatusBadRequest)
return
}
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
// Use custom response writer to get the status code
sw := &statusWriter{ResponseWriter: w}
// Serve the request
next.ServeHTTP(sw, r)
e.logRequest(r, bodyBytes, sw.statusCode)
})
}
func (e *Router) GetHandler() http.Handler {
return e.router
}

View file

@ -1,7 +0,0 @@
package api
import (
"net/http"
)
func (e *Router) logRequest(r *http.Request, bodyBytes []byte, statusCode int) {}

View file

@ -1,39 +1,53 @@
package spot
import (
"openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/server/tracer"
"time"
"openreplay/backend/internal/config/spot"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/flakeid"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/objectstorage"
"openreplay/backend/pkg/objectstorage/store"
"openreplay/backend/pkg/spot/auth"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/server/auth"
"openreplay/backend/pkg/server/keys"
"openreplay/backend/pkg/server/limiter"
spotAPI "openreplay/backend/pkg/spot/api"
"openreplay/backend/pkg/spot/service"
"openreplay/backend/pkg/spot/transcoder"
)
type ServicesBuilder struct {
Flaker *flakeid.Flaker
ObjStorage objectstorage.ObjectStorage
Auth auth.Auth
Spots service.Spots
Keys service.Keys
Transcoder transcoder.Transcoder
Auth auth.Auth
RateLimiter *limiter.UserRateLimiter
AuditTrail tracer.Tracer
SpotsAPI api.Handlers
}
func NewServiceBuilder(log logger.Logger, cfg *spot.Config, pgconn pool.Pool) (*ServicesBuilder, error) {
func NewServiceBuilder(log logger.Logger, cfg *spot.Config, webMetrics web.Web, pgconn pool.Pool) (*ServicesBuilder, error) {
objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil {
return nil, err
}
flaker := flakeid.NewFlaker(cfg.WorkerID)
spots := service.NewSpots(log, pgconn, flaker)
transcoder := transcoder.NewTranscoder(cfg, log, objStore, pgconn, spots)
keys := keys.NewKeys(log, pgconn)
auditrail, err := tracer.NewTracer(log, pgconn)
if err != nil {
return nil, err
}
responser := api.NewResponser(webMetrics)
handlers, err := spotAPI.NewHandlers(log, cfg, responser, spots, objStore, transcoder, keys)
if err != nil {
return nil, err
}
return &ServicesBuilder{
Flaker: flaker,
ObjStorage: objStore,
Auth: auth.NewAuth(log, cfg.JWTSecret, cfg.JWTSpotSecret, pgconn),
Spots: spots,
Keys: service.NewKeys(log, pgconn),
Transcoder: transcoder.NewTranscoder(cfg, log, objStore, pgconn, spots),
Auth: auth.NewAuth(log, cfg.JWTSecret, cfg.JWTSpotSecret, pgconn, keys),
RateLimiter: limiter.NewUserRateLimiter(10, 30, 1*time.Minute, 5*time.Minute),
AuditTrail: auditrail,
SpotsAPI: handlers,
}, nil
}

View file

@ -4,12 +4,12 @@ import (
"context"
"encoding/json"
"fmt"
"openreplay/backend/pkg/server/user"
"time"
"openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/flakeid"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/spot/auth"
)
const MaxNameLength = 64
@ -58,14 +58,14 @@ type Update struct {
}
type Spots interface {
Add(user *auth.User, name, comment string, duration int, crop []int) (*Spot, error)
GetByID(user *auth.User, spotID uint64) (*Spot, error)
Get(user *auth.User, opts *GetOpts) ([]*Spot, uint64, bool, error)
UpdateName(user *auth.User, spotID uint64, newName string) (*Spot, error)
AddComment(user *auth.User, spotID uint64, comment *Comment) (*Spot, error)
Delete(user *auth.User, spotIds []uint64) error
Add(user *user.User, name, comment string, duration int, crop []int) (*Spot, error)
GetByID(user *user.User, spotID uint64) (*Spot, error)
Get(user *user.User, opts *GetOpts) ([]*Spot, uint64, bool, error)
UpdateName(user *user.User, spotID uint64, newName string) (*Spot, error)
AddComment(user *user.User, spotID uint64, comment *Comment) (*Spot, error)
Delete(user *user.User, spotIds []uint64) error
SetStatus(spotID uint64, status string) error
GetStatus(user *auth.User, spotID uint64) (string, error)
GetStatus(user *user.User, spotID uint64) (string, error)
}
func NewSpots(log logger.Logger, pgconn pool.Pool, flaker *flakeid.Flaker) Spots {
@ -76,7 +76,7 @@ func NewSpots(log logger.Logger, pgconn pool.Pool, flaker *flakeid.Flaker) Spots
}
}
func (s *spotsImpl) Add(user *auth.User, name, comment string, duration int, crop []int) (*Spot, error) {
func (s *spotsImpl) Add(user *user.User, name, comment string, duration int, crop []int) (*Spot, error) {
switch {
case user == nil:
return nil, fmt.Errorf("user is required")
@ -142,7 +142,7 @@ func (s *spotsImpl) add(spot *Spot) error {
return nil
}
func (s *spotsImpl) GetByID(user *auth.User, spotID uint64) (*Spot, error) {
func (s *spotsImpl) GetByID(user *user.User, spotID uint64) (*Spot, error) {
switch {
case user == nil:
return nil, fmt.Errorf("user is required")
@ -152,7 +152,7 @@ func (s *spotsImpl) GetByID(user *auth.User, spotID uint64) (*Spot, error) {
return s.getByID(spotID, user)
}
func (s *spotsImpl) getByID(spotID uint64, user *auth.User) (*Spot, error) {
func (s *spotsImpl) getByID(spotID uint64, user *user.User) (*Spot, error) {
sql := `SELECT s.name, u.email, s.duration, s.crop, s.comments, s.created_at
FROM spots.spots s
JOIN public.users u ON s.user_id = u.user_id
@ -176,7 +176,7 @@ func (s *spotsImpl) getByID(spotID uint64, user *auth.User) (*Spot, error) {
return spot, nil
}
func (s *spotsImpl) Get(user *auth.User, opts *GetOpts) ([]*Spot, uint64, bool, error) {
func (s *spotsImpl) Get(user *user.User, opts *GetOpts) ([]*Spot, uint64, bool, error) {
switch {
case user == nil:
return nil, 0, false, fmt.Errorf("user is required")
@ -200,7 +200,7 @@ func (s *spotsImpl) Get(user *auth.User, opts *GetOpts) ([]*Spot, uint64, bool,
return s.getAll(user, opts)
}
func (s *spotsImpl) getAll(user *auth.User, opts *GetOpts) ([]*Spot, uint64, bool, error) {
func (s *spotsImpl) getAll(user *user.User, opts *GetOpts) ([]*Spot, uint64, bool, error) {
sql := `SELECT COUNT(1) OVER () AS total, s.spot_id, s.name, u.email, s.duration, s.created_at
FROM spots.spots s
JOIN public.users u ON s.user_id = u.user_id
@ -261,7 +261,7 @@ func (s *spotsImpl) doesTenantHasSpots(tenantID uint64) bool {
return count > 0
}
func (s *spotsImpl) UpdateName(user *auth.User, spotID uint64, newName string) (*Spot, error) {
func (s *spotsImpl) UpdateName(user *user.User, spotID uint64, newName string) (*Spot, error) {
switch {
case user == nil:
return nil, fmt.Errorf("user is required")
@ -276,7 +276,7 @@ func (s *spotsImpl) UpdateName(user *auth.User, spotID uint64, newName string) (
return s.updateName(spotID, newName, user)
}
func (s *spotsImpl) updateName(spotID uint64, newName string, user *auth.User) (*Spot, error) {
func (s *spotsImpl) updateName(spotID uint64, newName string, user *user.User) (*Spot, error) {
sql := `WITH updated AS (
UPDATE spots.spots SET name = $1, updated_at = $2
WHERE spot_id = $3 AND tenant_id = $4 AND deleted_at IS NULL RETURNING *)
@ -291,7 +291,7 @@ func (s *spotsImpl) updateName(spotID uint64, newName string, user *auth.User) (
return &Spot{ID: spotID, Name: newName}, nil
}
func (s *spotsImpl) AddComment(user *auth.User, spotID uint64, comment *Comment) (*Spot, error) {
func (s *spotsImpl) AddComment(user *user.User, spotID uint64, comment *Comment) (*Spot, error) {
switch {
case user == nil:
return nil, fmt.Errorf("user is required")
@ -311,7 +311,7 @@ func (s *spotsImpl) AddComment(user *auth.User, spotID uint64, comment *Comment)
return s.addComment(spotID, comment, user)
}
func (s *spotsImpl) addComment(spotID uint64, newComment *Comment, user *auth.User) (*Spot, error) {
func (s *spotsImpl) addComment(spotID uint64, newComment *Comment, user *user.User) (*Spot, error) {
sql := `WITH updated AS (
UPDATE spots.spots
SET comments = array_append(comments, $1), updated_at = $2
@ -332,7 +332,7 @@ func (s *spotsImpl) addComment(spotID uint64, newComment *Comment, user *auth.Us
return &Spot{ID: spotID}, nil
}
func (s *spotsImpl) Delete(user *auth.User, spotIds []uint64) error {
func (s *spotsImpl) Delete(user *user.User, spotIds []uint64) error {
switch {
case user == nil:
return fmt.Errorf("user is required")
@ -342,7 +342,7 @@ func (s *spotsImpl) Delete(user *auth.User, spotIds []uint64) error {
return s.deleteSpots(spotIds, user)
}
func (s *spotsImpl) deleteSpots(spotIds []uint64, user *auth.User) error {
func (s *spotsImpl) deleteSpots(spotIds []uint64, user *user.User) error {
sql := `WITH updated AS (UPDATE spots.spots SET deleted_at = NOW() WHERE tenant_id = $1 AND spot_id IN (`
args := []interface{}{user.TenantID}
for i, spotID := range spotIds {
@ -378,7 +378,7 @@ func (s *spotsImpl) SetStatus(spotID uint64, status string) error {
return nil
}
func (s *spotsImpl) GetStatus(user *auth.User, spotID uint64) (string, error) {
func (s *spotsImpl) GetStatus(user *user.User, spotID uint64) (string, error) {
switch {
case user == nil:
return "", fmt.Errorf("user is required")

View file

@ -0,0 +1,73 @@
package api
import (
"context"
"fmt"
"net/http"
"time"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/sessions"
"openreplay/backend/pkg/tags"
"openreplay/backend/pkg/token"
)
type handlersImpl struct {
log logger.Logger
responser *api.Responser
tokenizer *token.Tokenizer
sessions sessions.Sessions
tags tags.Tags
}
func NewHandlers(log logger.Logger, responser *api.Responser, tokenizer *token.Tokenizer, sessions sessions.Sessions, tags tags.Tags) (api.Handlers, error) {
return &handlersImpl{
log: log,
responser: responser,
tokenizer: tokenizer,
sessions: sessions,
tags: tags,
}, nil
}
func (e *handlersImpl) GetAll() []*api.Description {
return []*api.Description{
{"/v1/tags", e.getTags, "GET"},
}
}
func (e *handlersImpl) getTags(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// TODO: move check authorization into middleware (we gonna have 2 different auth middlewares)
sessionData, err := e.tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
sessInfo, err := e.sessions.Get(sessionData.ID)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
// Add sessionID and projectID to context
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", sessInfo.ProjectID)))
// Get tags
tags, err := e.tags.Get(sessInfo.ProjectID)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
type UrlResponse struct {
Tags interface{} `json:"tags"`
}
e.responser.ResponseWithJSON(e.log, r.Context(), w, &UrlResponse{Tags: tags}, startTime, r.URL.Path, bodySize)
}

View file

@ -0,0 +1,211 @@
package api
import (
"context"
"encoding/json"
"errors"
"fmt"
"net/http"
"time"
"github.com/gorilla/mux"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/objectstorage"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/sessions"
"openreplay/backend/pkg/token"
"openreplay/backend/pkg/uxtesting"
)
type handlersImpl struct {
log logger.Logger
responser *api.Responser
jsonSizeLimit int64
tokenizer *token.Tokenizer
sessions sessions.Sessions
uxTesting uxtesting.UXTesting
objStorage objectstorage.ObjectStorage
}
func NewHandlers(log logger.Logger, responser *api.Responser, jsonSizeLimit int64, tokenizer *token.Tokenizer, sessions sessions.Sessions,
uxTesting uxtesting.UXTesting, objStorage objectstorage.ObjectStorage) (api.Handlers, error) {
return &handlersImpl{
log: log,
responser: responser,
jsonSizeLimit: jsonSizeLimit,
tokenizer: tokenizer,
sessions: sessions,
uxTesting: uxTesting,
objStorage: objStorage,
}, nil
}
func (e *handlersImpl) GetAll() []*api.Description {
return []*api.Description{
{"/v1/web/uxt/signals/test", e.sendUXTestSignal, "POST"},
{"/v1/web/uxt/signals/task", e.sendUXTaskSignal, "POST"},
{"/v1/web/uxt/test/{id}", e.getUXTestInfo, "GET"},
{"/v1/web/uxt/upload-url", e.getUXUploadUrl, "GET"},
}
}
func (e *handlersImpl) getUXTestInfo(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Check authorization
sessionData, err := e.tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
sess, err := e.sessions.Get(sessionData.ID)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, err, startTime, r.URL.Path, bodySize)
return
}
// Add projectID to context
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", sess.ProjectID)))
// Get taskID
vars := mux.Vars(r)
id := vars["id"]
// Get task info
info, err := e.uxTesting.GetInfo(id)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
if sess.ProjectID != info.ProjectID {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, errors.New("project mismatch"), startTime, r.URL.Path, bodySize)
return
}
type TaskInfoResponse struct {
Task *uxtesting.UXTestInfo `json:"test"`
}
e.responser.ResponseWithJSON(e.log, r.Context(), w, &TaskInfoResponse{Task: info}, startTime, r.URL.Path, bodySize)
}
func (e *handlersImpl) sendUXTestSignal(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Check authorization
sessionData, err := e.tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
// Add sessionID and projectID to context
if info, err := e.sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
bodyBytes, err := api.ReadBody(e.log, w, r, e.jsonSizeLimit)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
// Parse request body
req := &uxtesting.TestSignal{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
req.SessionID = sessionData.ID
// Save test signal
if err := e.uxTesting.SetTestSignal(req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
e.responser.ResponseOK(e.log, r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *handlersImpl) sendUXTaskSignal(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Check authorization
sessionData, err := e.tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
// Add sessionID and projectID to context
if info, err := e.sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
bodyBytes, err := api.ReadBody(e.log, w, r, e.jsonSizeLimit)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
return
}
bodySize = len(bodyBytes)
// Parse request body
req := &uxtesting.TaskSignal{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
req.SessionID = sessionData.ID
// Save test signal
if err := e.uxTesting.SetTaskSignal(req); err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
e.responser.ResponseOK(e.log, r.Context(), w, startTime, r.URL.Path, bodySize)
}
func (e *handlersImpl) getUXUploadUrl(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Check authorization
sessionData, err := e.tokenizer.ParseFromHTTPRequest(r)
if sessionData != nil {
r = r.WithContext(context.WithValue(r.Context(), "sessionID", fmt.Sprintf("%d", sessionData.ID)))
}
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
// Add sessionID and projectID to context
if info, err := e.sessions.Get(sessionData.ID); err == nil {
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", info.ProjectID)))
}
key := fmt.Sprintf("%d/ux_webcam_record.webm", sessionData.ID)
url, err := e.objStorage.GetPreSignedUploadUrl(key)
if err != nil {
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
type UrlResponse struct {
URL string `json:"url"`
}
e.responser.ResponseWithJSON(e.log, r.Context(), w, &UrlResponse{URL: url}, startTime, r.URL.Path, bodySize)
}

View file

@ -58,6 +58,7 @@ def get_event_type(event_type: Union[schemas.EventType, schemas.PerformanceEvent
schemas.EventType.REQUEST: "REQUEST",
schemas.EventType.REQUEST_DETAILS: "REQUEST",
schemas.PerformanceEventType.FETCH_FAILED: "REQUEST",
schemas.GraphqlFilterType.GRAPHQL_NAME: "GRAPHQL",
schemas.EventType.STATE_ACTION: "STATEACTION",
schemas.EventType.ERROR: "ERROR",
schemas.PerformanceEventType.LOCATION_AVG_CPU_LOAD: 'PERFORMANCE',

View file

@ -1,4 +1,5 @@
#!/bin/sh
export TZ=UTC
sh env_vars.sh
source /tmp/.env.override

View file

@ -1,4 +1,5 @@
#!/bin/sh
export TZ=UTC
export ASSIST_KEY=ignore
sh env_vars.sh
source /tmp/.env.override

View file

@ -1,4 +1,5 @@
#!/bin/sh
export TZ=UTC
export ASSIST_KEY=ignore
sh env_vars.sh
source /tmp/.env.override

View file

@ -1,37 +0,0 @@
package router
import (
"github.com/gorilla/mux"
"net/http"
"strconv"
"time"
)
func (e *Router) getConditions(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Check authorization
_, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
// Get taskID
vars := mux.Vars(r)
projID := vars["project"]
projectID, err := strconv.Atoi(projID)
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
// Get task info
info, err := e.services.Conditions.Get(uint32(projectID))
if err != nil {
e.ResponseWithError(r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
e.ResponseWithJSON(r.Context(), w, info, startTime, r.URL.Path, bodySize)
}

View file

@ -0,0 +1,64 @@
package api
import (
"net/http"
"strconv"
"time"
"github.com/gorilla/mux"
"openreplay/backend/pkg/conditions"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/server/api"
"openreplay/backend/pkg/token"
)
type handlersImpl struct {
log logger.Logger
tokenizer *token.Tokenizer
conditions conditions.Conditions
}
func NewHandlers(log logger.Logger, tokenizer *token.Tokenizer, conditions conditions.Conditions) (api.Handlers, error) {
return &handlersImpl{
log: log,
tokenizer: tokenizer,
conditions: conditions,
}, nil
}
func (e *handlersImpl) GetAll() []*api.Description {
return []*api.Description{
{"/v1/web/conditions/{project}", e.getConditions, "GET"},
{"/v1/mobile/conditions/{project}", e.getConditions, "GET"},
}
}
func (e *handlersImpl) getConditions(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
bodySize := 0
// Check authorization
_, err := e.tokenizer.ParseFromHTTPRequest(r)
if err != nil {
api.ResponseWithError(e.log, r.Context(), w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
return
}
// Get taskID
vars := mux.Vars(r)
projID := vars["project"]
projectID, err := strconv.Atoi(projID)
if err != nil {
api.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
return
}
// Get task info
info, err := e.conditions.Get(uint32(projectID))
if err != nil {
api.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
return
}
api.ResponseWithJSON(e.log, r.Context(), w, info, startTime, r.URL.Path, bodySize)
}

View file

@ -0,0 +1,96 @@
package tracer
import (
"bytes"
"encoding/json"
"io"
"net/http"
"github.com/gorilla/mux"
"openreplay/backend/pkg/server/user"
)
type statusWriter struct {
http.ResponseWriter
statusCode int
}
func (w *statusWriter) WriteHeader(statusCode int) {
w.statusCode = statusCode
w.ResponseWriter.WriteHeader(statusCode)
}
func (w *statusWriter) Write(b []byte) (int, error) {
if w.statusCode == 0 {
w.statusCode = http.StatusOK
}
return w.ResponseWriter.Write(b)
}
func (t *tracerImpl) Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Read body and restore the io.ReadCloser to its original state
bodyBytes, err := io.ReadAll(r.Body)
if err != nil {
http.Error(w, "can't read body", http.StatusBadRequest)
return
}
r.Body = io.NopCloser(bytes.NewBuffer(bodyBytes))
// Use custom response writer to get the status code
sw := &statusWriter{ResponseWriter: w}
// Serve the request
next.ServeHTTP(sw, r)
t.logRequest(r, bodyBytes, sw.statusCode)
})
}
var routeMatch = map[string]string{
"POST" + "/spot/v1/spots": "createSpot",
"GET" + "/spot/v1/spots/{id}": "getSpot",
"PATCH" + "/spot/v1/spots/{id}": "updateSpot",
"GET" + "/spot/v1/spots": "getSpots",
"DELETE" + "/spot/v1/spots": "deleteSpots",
"POST" + "/spot/v1/spots/{id}/comment": "addComment",
"GET" + "/spot/v1/spots/{id}/video": "getSpotVideo",
"PATCH" + "/spot/v1/spots/{id}/public-key": "updatePublicKey",
}
func (t *tracerImpl) logRequest(r *http.Request, bodyBytes []byte, statusCode int) {
pathTemplate, err := mux.CurrentRoute(r).GetPathTemplate()
if err != nil {
t.log.Error(r.Context(), "failed to get path template: %s", err)
}
t.log.Debug(r.Context(), "path template: %s", pathTemplate)
if _, ok := routeMatch[r.Method+pathTemplate]; !ok {
t.log.Debug(r.Context(), "no match for route: %s %s", r.Method, pathTemplate)
return
}
// Convert the parameters to json
query := r.URL.Query()
params := make(map[string]interface{})
for key, values := range query {
if len(values) > 1 {
params[key] = values
} else {
params[key] = values[0]
}
}
jsonData, err := json.Marshal(params)
if err != nil {
t.log.Error(r.Context(), "failed to marshal query parameters: %s", err)
}
requestData := &RequestData{
Action: routeMatch[r.Method+pathTemplate],
Method: r.Method,
PathFormat: pathTemplate,
Endpoint: r.URL.Path,
Payload: bodyBytes,
Parameters: jsonData,
Status: statusCode,
}
userData := r.Context().Value("userData").(*user.User)
t.trace(userData, requestData)
// DEBUG
t.log.Debug(r.Context(), "request data: %v", requestData)
}

View file

@ -0,0 +1,106 @@
package tracer
import (
"context"
"errors"
"net/http"
"openreplay/backend/pkg/db/postgres"
db "openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/pool"
"openreplay/backend/pkg/server/user"
)
type Tracer interface {
Middleware(next http.Handler) http.Handler
Close() error
}
type tracerImpl struct {
log logger.Logger
conn db.Pool
traces postgres.Bulk
saver pool.WorkerPool
}
func NewTracer(log logger.Logger, conn db.Pool) (Tracer, error) {
switch {
case log == nil:
return nil, errors.New("logger is required")
case conn == nil:
return nil, errors.New("connection is required")
}
tracer := &tracerImpl{
log: log,
conn: conn,
}
if err := tracer.initBulk(); err != nil {
return nil, err
}
tracer.saver = pool.NewPool(1, 200, tracer.sendTraces)
return tracer, nil
}
func (t *tracerImpl) initBulk() (err error) {
t.traces, err = postgres.NewBulk(t.conn,
"traces",
"(user_id, tenant_id, auth, action, method, path_format, endpoint, payload, parameters, status)",
"($%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d, $%d)",
10, 50)
if err != nil {
return err
}
return nil
}
type Task struct {
UserID *uint64
TenantID uint64
Auth *string
Data *RequestData
}
func (t *tracerImpl) sendTraces(payload interface{}) {
rec := payload.(*Task)
t.log.Debug(context.Background(), "Sending traces, %v", rec)
if err := t.traces.Append(rec.UserID, rec.TenantID, rec.Auth, rec.Data.Action, rec.Data.Method, rec.Data.PathFormat,
rec.Data.Endpoint, rec.Data.Payload, rec.Data.Parameters, rec.Data.Status); err != nil {
t.log.Error(context.Background(), "can't append trace: %s", err)
}
}
type RequestData struct {
Action string
Method string
PathFormat string
Endpoint string
Payload []byte
Parameters []byte
Status int
}
func (t *tracerImpl) trace(user *user.User, data *RequestData) error {
switch {
case user == nil:
return errors.New("user is required")
case data == nil:
return errors.New("request is required")
}
trace := &Task{
UserID: &user.ID,
TenantID: user.TenantID,
Auth: &user.AuthMethod,
Data: data,
}
t.saver.Submit(trace)
return nil
}
func (t *tracerImpl) Close() error {
t.saver.Stop()
if err := t.traces.Send(); err != nil {
return err
}
return nil
}

View file

@ -1,4 +1,10 @@
package router
package web
type NotStartedRequest struct {
ProjectKey *string `json:"projectKey"`
TrackerVersion string `json:"trackerVersion"`
DoNotTrack bool `json:"DoNotTrack"`
}
type StartSessionRequest struct {
Token string `json:"token"`

View file

@ -1,8 +1,5 @@
import React from 'react';
import withPageTitle from 'HOCs/withPageTitle';
import NoSessionsMessage from 'Shared/NoSessionsMessage';
import MainSearchBar from 'Shared/MainSearchBar';
import SearchActions from 'Shared/SearchActions';
import SessionsTabOverview from 'Shared/SessionsTabOverview/SessionsTabOverview';
import FFlagsList from 'Components/FFlags';
import NewFFlag from 'Components/FFlags/NewFFlag';
@ -12,7 +9,7 @@ import { withRouter, RouteComponentProps, useLocation } from 'react-router-dom';
import FlagView from 'Components/FFlags/FlagView/FlagView';
import { observer } from 'mobx-react-lite';
import { useStore } from '@/mstore';
import NotesRoute from "../shared/SessionsTabOverview/components/Notes/NotesRoute";
import NotesList from 'Shared/SessionsTabOverview/components/Notes/NoteList';
// @ts-ignore
interface IProps extends RouteComponentProps {
@ -33,23 +30,19 @@ function Overview({ match: { params } }: IProps) {
React.useEffect(() => {
searchStore.setActiveTab(tab);
}, [tab]);
return (
<Switch>
<Route
exact
strict
path={[withSiteId(sessions(), siteId), withSiteId(bookmarks(), siteId)]}
>
<Route exact strict
path={[withSiteId(sessions(), siteId), withSiteId(bookmarks(), siteId)]}>
<div className="mb-5 w-full mx-auto" style={{ maxWidth: '1360px' }}>
<NoSessionsMessage siteId={siteId} />
<SearchActions />
<MainSearchBar />
<div className="my-4" />
<SessionsTabOverview />
</div>
</Route>
<Route exact strict path={withSiteId(notes(), siteId)}>
<NotesRoute />
<div className="mb-5 w-full mx-auto" style={{ maxWidth: '1360px' }}>
<NotesList />
</div>
</Route>
<Route exact strict path={withSiteId(fflags(), siteId)}>
<FFlagsList siteId={siteId} />

View file

@ -23,9 +23,9 @@ const lineLength = 40;
function WSPanel({ socketMsgList, onClose }: Props) {
const [query, setQuery] = React.useState('');
const [list, setList] = React.useState(socketMsgList);
const [selectedRow, setSelectedRow] = React.useState<SocketMsg | null>(null);
const [selectedRow, setSelectedRow] = React.useState<{ msg: SocketMsg, id: number } | null>(null);
const onQueryChange = (e) => {
const onQueryChange = (e: any) => {
setQuery(e.target.value);
const newList = filterList(socketMsgList, e.target.value, [
'data',
@ -69,15 +69,16 @@ function WSPanel({ socketMsgList, onClose }: Props) {
position: 'relative',
}}
>
{list.map((msg) => (
{list.map((msg, i) => (
<Row
msg={msg}
key={msg.timestamp}
onSelect={() => setSelectedRow(msg)}
onSelect={() => setSelectedRow({ msg, id: i })}
isSelected={selectedRow ? selectedRow.id === i : false}
/>
))}
{selectedRow ? (
<SelectedRow msg={selectedRow} onClose={() => setSelectedRow(null)} />
<SelectedRow msg={selectedRow.msg} onClose={() => setSelectedRow(null)} />
) : null}
</div>
</div>
@ -127,7 +128,7 @@ function MsgDirection({ dir }: { dir: 'up' | 'down' }) {
);
}
function Row({ msg, onSelect }: { msg: SocketMsg; onSelect: () => void }) {
function Row({ msg, onSelect, isSelected }: { msg: SocketMsg; isSelected: boolean; onSelect: () => void }) {
return (
<>
<div
@ -149,7 +150,7 @@ function Row({ msg, onSelect }: { msg: SocketMsg; onSelect: () => void }) {
'rounded-full font-bold text-xl p-2 bg-white w-6 h-6 flex items-center justify-center'
}
>
<span>{isOpen ? '-' : '+'}</span>
<span>{isSelected ? '-' : '+'}</span>
</div>
) : null}
</div>

View file

@ -6,6 +6,9 @@ import LatestSessionsMessage from './components/LatestSessionsMessage';
import SessionHeader from './components/SessionHeader';
import SessionList from './components/SessionList';
import { observer } from 'mobx-react-lite';
import NoSessionsMessage from 'Shared/NoSessionsMessage/NoSessionsMessage';
import MainSearchBar from 'Shared/MainSearchBar/MainSearchBar';
import SearchActions from "../SearchActions";
function SessionsTabOverview() {
const [query, setQuery] = React.useState('');
@ -23,21 +26,27 @@ function SessionsTabOverview() {
const testingKey = localStorage.getItem('__mauricio_testing_access') === 'true';
return (
<div className="widget-wrapper">
{testingKey ? (
<Input
value={query}
onKeyDown={handleKeyDown}
onChange={(e) => setQuery(e.target.value)}
className={'mb-2'}
placeholder={'ask session ai'}
/>
) : null}
<SessionHeader />
<div className="border-b" />
<LatestSessionsMessage />
<SessionList />
</div>
<>
<NoSessionsMessage />
<SearchActions />
<MainSearchBar />
<div className="my-4" />
<div className="widget-wrapper">
{testingKey ? (
<Input
value={query}
onKeyDown={handleKeyDown}
onChange={(e) => setQuery(e.target.value)}
className={'mb-2'}
placeholder={'ask session ai'}
/>
) : null}
<SessionHeader />
<div className="border-b" />
<LatestSessionsMessage />
<SessionList />
</div>
</>
);
}

View file

@ -7,11 +7,16 @@ import { observer } from 'mobx-react-lite';
function LatestSessionsMessage() {
const { searchStore } = useStore();
const count = searchStore.latestList.size;
const onShowNewSessions = () => {
void searchStore.updateCurrentPage(1, true);
};
return count > 0 ? (
<div
className="bg-amber-50 p-1 flex w-full border-b text-center justify-center link"
style={{ backgroundColor: 'rgb(255 251 235)' }}
onClick={() => searchStore.updateCurrentPage(1)}
onClick={onShowNewSessions}
>
Show {numberWithCommas(count)} New {count > 1 ? 'Sessions' : 'Session'}
</div>

View file

@ -5,57 +5,70 @@ import NoteItem from './NoteItem';
import { observer } from 'mobx-react-lite';
import { useStore } from 'App/mstore';
import AnimatedSVG, { ICONS } from 'Shared/AnimatedSVG/AnimatedSVG';
import NoteTags from 'Shared/SessionsTabOverview/components/Notes/NoteTags';
function NotesList() {
const { notesStore } = useStore();
React.useEffect(() => {
void notesStore.fetchNotes();
void notesStore.fetchNotes();
}, [notesStore.page]);
const list = notesStore.notes;
return (
<Loader loading={notesStore.loading}>
<NoContent
show={list.length === 0}
title={
<div className="flex flex-col items-center justify-center">
{/* <Icon name="no-dashboard" size={80} color="figmaColors-accent-secondary" /> */}
<AnimatedSVG name={ICONS.NO_NOTES} size={60} />
<div className="text-center mt-4 text-lg font-medium">No notes yet</div>
</div>
}
subtext={
<div className="text-center flex justify-center items-center flex-col">
Note observations during session replays and share them with your team.
</div>
}
>
<div className="border-b rounded bg-white">
{list.map((note) => (
<React.Fragment key={note.noteId}>
<NoteItem note={note} />
</React.Fragment>
))}
</div>
<>
<div className="widget-wrapper">
<div className="flex items-center px-4 py-1 justify-between w-full">
<h2 className="text-2xl capitalize mr-4">Notes</h2>
<div className="w-full flex items-center justify-between py-4 px-6">
<div className="text-disabled-text">
Showing{' '}
<span className="font-semibold">{Math.min(list.length, notesStore.pageSize)}</span> out
of <span className="font-semibold">{notesStore.total}</span> notes
<div className="flex items-center justify-end w-full">
<NoteTags />
</div>
<Pagination
page={notesStore.page}
total={notesStore.total}
onPageChange={(page) => notesStore.changePage(page)}
limit={notesStore.pageSize}
debounceRequest={100}
/>
</div>
</NoContent>
</Loader>
<div className="border-b" />
<Loader loading={notesStore.loading}>
<NoContent
show={list.length === 0}
title={
<div className="flex flex-col items-center justify-center">
{/* <Icon name="no-dashboard" size={80} color="figmaColors-accent-secondary" /> */}
<AnimatedSVG name={ICONS.NO_NOTES} size={60} />
<div className="text-center mt-4 text-lg font-medium">No notes yet</div>
</div>
}
subtext={
<div className="text-center flex justify-center items-center flex-col">
Note observations during session replays and share them with your team.
</div>
}
>
<div className="border-b rounded bg-white">
{list.map((note) => (
<React.Fragment key={note.noteId}>
<NoteItem note={note} />
</React.Fragment>
))}
</div>
<div className="w-full flex items-center justify-between py-4 px-6">
<div className="text-disabled-text">
Showing{' '}
<span className="font-semibold">{Math.min(list.length, notesStore.pageSize)}</span> out
of <span className="font-semibold">{notesStore.total}</span> notes
</div>
<Pagination
page={notesStore.page}
total={notesStore.total}
onPageChange={(page) => notesStore.changePage(page)}
limit={notesStore.pageSize}
debounceRequest={100}
/>
</div>
</NoContent>
</Loader>
</div>
</>
);
}

View file

@ -2,7 +2,6 @@ import React, { useMemo } from 'react';
import Period from 'Types/app/period';
import SelectDateRange from 'Shared/SelectDateRange';
import SessionTags from '../SessionTags';
import NoteTags from '../Notes/NoteTags';
import SessionSort from '../SessionSort';
import { Space } from 'antd';
import { useStore } from 'App/mstore';
@ -32,18 +31,15 @@ function SessionHeader() {
return (
<div className="flex items-center px-4 py-1 justify-between w-full">
<h2 className="text-2xl capitalize mr-4">{title}</h2>
<div className="flex items-center w-full justify-end">
{activeTab.type !== 'bookmarks' && (
<>
<SessionTags />
<div className="mr-auto" />
<Space>
<SelectDateRange isAnt period={period} onChange={onDateChange} right={true} />
<SessionSort />
</Space>
</>
)}
</div>
<div className="flex items-center w-full justify-end">
{activeTab.type !== 'bookmarks' && <SessionTags />}
<div className="mr-auto" />
<Space>
{activeTab.type !== 'bookmarks' &&
<SelectDateRange isAnt period={period} onChange={onDateChange} right={true} />}
<SessionSort />
</Space>
</div>
</div>
);
}

View file

@ -73,7 +73,6 @@ function SessionList() {
}, [isBookmark, isVault, activeTab, location.pathname]);
const [statusData, setStatusData] = React.useState<SessionStatus>({ status: 0, count: 0 });
const fetchStatus = async () => {
const response = await sessionService.getRecordingStatus();
setStatusData({

View file

@ -2,6 +2,8 @@
import { DateTime, Duration } from 'luxon'; // TODO
import { Timezone } from 'App/mstore/types/sessionSettings';
import { LAST_24_HOURS, LAST_30_DAYS, LAST_7_DAYS } from 'Types/app/period';
import { CUSTOM_RANGE } from '@/dateRange';
export function getDateFromString(date: string, format = 'yyyy-MM-dd HH:mm:ss:SSS'): string {
return DateTime.fromISO(date).toFormat(format);
@ -191,3 +193,35 @@ export const countDaysFrom = (timestamp: number): number => {
const d = new Date();
return Math.round(Math.abs(d.getTime() - date.toJSDate().getTime()) / (1000 * 3600 * 24));
}
export const getDateRangeUTC = (rangeName: string, customStartDate?: number, customEndDate?: number): {
startDate: number;
endDate: number
} => {
let endDate = new Date().getTime();
let startDate: number;
switch (rangeName) {
case LAST_7_DAYS:
startDate = endDate - 7 * 24 * 60 * 60 * 1000;
break;
case LAST_30_DAYS:
startDate = endDate - 30 * 24 * 60 * 60 * 1000;
break;
case CUSTOM_RANGE:
if (!customStartDate || !customEndDate) {
throw new Error('Start date and end date must be provided for CUSTOM_RANGE.');
}
startDate = customStartDate;
endDate = customEndDate;
break;
case LAST_24_HOURS:
default:
startDate = endDate - 24 * 60 * 60 * 1000;
}
return {
startDate,
endDate
};
}

View file

@ -6,46 +6,59 @@ import Search from '@/mstore/types/search';
import { getFilterFromJson } from 'Types/filter/newFilter';
interface Props {
onBeforeLoad?: () => Promise<any>;
appliedFilter: any;
onBeforeLoad?: () => Promise<void>;
appliedFilter: Record<string, any>;
loading: boolean;
}
const useSessionSearchQueryHandler = (props: Props) => {
const useSessionSearchQueryHandler = ({ onBeforeLoad, appliedFilter, loading }: Props) => {
const { searchStore } = useStore();
const [beforeHookLoaded, setBeforeHookLoaded] = useState(!props.onBeforeLoad);
const { appliedFilter, loading } = props;
const [beforeHookLoaded, setBeforeHookLoaded] = useState(!onBeforeLoad);
const history = useHistory();
// Apply filter from the query string when the component mounts
useEffect(() => {
const applyFilterFromQuery = async () => {
if (!loading && !searchStore.urlParsed) {
if (props.onBeforeLoad) {
await props.onBeforeLoad();
setBeforeHookLoaded(true);
}
try {
if (onBeforeLoad) {
await onBeforeLoad();
setBeforeHookLoaded(true);
}
const converter = JsonUrlConverter.urlParamsToJson(history.location.search);
const json: any = getFilterFromJson(converter.toJSON());
const filter = new Search(json);
searchStore.applyFilter(filter, true);
searchStore.setUrlParsed()
const converter = JsonUrlConverter.urlParamsToJson(history.location.search);
const json = getFilterFromJson(converter.toJSON());
const filter = new Search(json);
searchStore.applyFilter(filter, true);
searchStore.setUrlParsed();
} catch (error) {
console.error('Error applying filter from query:', error);
}
}
};
void applyFilterFromQuery();
}, [loading]);
}, [loading, onBeforeLoad, searchStore, history.location.search]);
// Update the URL whenever the appliedFilter changes
useEffect(() => {
const generateUrlQuery = () => {
const updateUrlWithFilter = () => {
if (!loading && beforeHookLoaded) {
const converter = JsonUrlConverter.jsonToUrlParams(appliedFilter);
history.replace({ search: converter });
const query = JsonUrlConverter.jsonToUrlParams(appliedFilter);
history.replace({ search: query });
}
};
generateUrlQuery();
}, [appliedFilter, loading, beforeHookLoaded]);
updateUrlWithFilter();
}, [appliedFilter, loading, beforeHookLoaded, history]);
// Ensure the URL syncs on remount if already parsed
useEffect(() => {
if (searchStore.urlParsed) {
const query = JsonUrlConverter.jsonToUrlParams(appliedFilter);
history.replace({ search: query });
}
}, [appliedFilter, searchStore.urlParsed, history]);
return null;
};

View file

@ -1,8 +1,7 @@
import Filter from 'Types/filter';
import { FilterKey } from 'Types/filter/filterType';
import { filtersMap } from 'Types/filter/newFilter';
import { makeAutoObservable } from 'mobx';
import Search from 'App/mstore/types/search';
import { aiService } from 'App/services';
export default class AiFiltersStore {
@ -99,7 +98,7 @@ export default class AiFiltersStore {
this.isLoading = true;
try {
const r = await aiService.getSearchFilters(query);
const filterObj = Filter({
const filterObj = new Search({
filters: r.filters.map((f: Record<string, any>) => {
if (f.key === 'fetch') {
return mapFetch(f);

View file

@ -20,7 +20,7 @@ export default class FilterStore {
}
setTopValues = (key: string, values: TopValue[]) => {
this.topValues[key] = values.filter((value) => value !== null && value.value !== '');
this.topValues[key] = values?.filter((value) => value !== null && value.value !== '');
};
fetchTopValues = async (key: string, source?: string) => {

View file

@ -162,15 +162,14 @@ class SearchStore {
});
}
updateCurrentPage(page: number) {
updateCurrentPage(page: number, force = false) {
this.currentPage = page;
void this.fetchSessions();
void this.fetchSessions(force);
}
setActiveTab(tab: string) {
runInAction(() => {
this.activeTab = TAB_MAP[tab];
this.currentPage = 1;
});
}
@ -229,12 +228,13 @@ class SearchStore {
if (this.latestRequestTime) {
const period = Period({ rangeName: CUSTOM_RANGE, start: this.latestRequestTime, end: Date.now() });
const newTimestamps: any = period.toJSON();
filter.startTimestamp = newTimestamps.startDate;
filter.endTimestamp = newTimestamps.endDate;
filter.startDate = newTimestamps.startDate;
filter.endDate = newTimestamps.endDate;
}
searchService.checkLatestSessions(filter).then((response: any) => {
this.latestList = response;
this.latestRequestTime = Date.now();
runInAction(() => {
this.latestList = List(response);
});
});
}
@ -264,8 +264,10 @@ class SearchStore {
});
}
this.currentPage = 1;
if (filter.value && filter.value[0] && filter.value[0] !== '') {
this.fetchSessions();
void this.fetchSessions();
}
}
@ -336,6 +338,9 @@ class SearchStore {
filter.filters = filter.filters.concat(tagFilter);
}
this.latestRequestTime = Date.now();
this.latestList = List();
await sessionStore.fetchSessions({
...filter,
page: this.currentPage,

View file

@ -170,7 +170,7 @@ export default class SessionStore {
}
const nextEntryNum =
keys.length > 0
? Math.max(...keys.map((key) => this.prefetchedMobUrls[key].entryNum || 0)) + 1
? Math.max(...keys.map((key) => this.prefetchedMobUrls[key]?.entryNum || 0)) + 1
: 0;
this.prefetchedMobUrls[sessionId] = {
data: fileData,

View file

@ -1,7 +1,8 @@
import { DATE_RANGE_VALUES, CUSTOM_RANGE, getDateRangeFromValue } from 'App/dateRange';
import Filter, { checkFilterValue, IFilter } from 'App/mstore/types/filter';
import { CUSTOM_RANGE, DATE_RANGE_VALUES, getDateRangeFromValue } from 'App/dateRange';
import Filter, { IFilter } from 'App/mstore/types/filter';
import FilterItem from 'App/mstore/types/filterItem';
import { action, makeAutoObservable, observable } from 'mobx';
import { makeAutoObservable, observable } from 'mobx';
import { LAST_24_HOURS, LAST_30_DAYS, LAST_7_DAYS } from 'Types/app/period';
// @ts-ignore
const rangeValue = DATE_RANGE_VALUES.LAST_24_HOURS;
@ -69,7 +70,7 @@ export default class Search {
constructor(initialData?: Partial<ISearch>) {
makeAutoObservable(this, {
filters: observable,
filters: observable
});
Object.assign(this, {
name: '',
@ -142,11 +143,48 @@ export default class Search {
return new FilterItem(filter).toJson();
});
const { startDate, endDate } = this.getDateRange(js.rangeValue, js.startDate, js.endDate);
js.startDate = startDate;
js.endDate = endDate;
delete js.createdAt;
delete js.key;
return js;
}
private getDateRange(rangeName: string, customStartDate: number, customEndDate: number): {
startDate: number;
endDate: number
} {
let endDate = new Date().getTime();
let startDate: number;
switch (rangeName) {
case LAST_7_DAYS:
startDate = endDate - 7 * 24 * 60 * 60 * 1000;
break;
case LAST_30_DAYS:
startDate = endDate - 30 * 24 * 60 * 60 * 1000;
break;
case CUSTOM_RANGE:
if (!customStartDate || !customEndDate) {
throw new Error('Start date and end date must be provided for CUSTOM_RANGE.');
}
startDate = customStartDate;
endDate = customEndDate;
break;
case LAST_24_HOURS:
default:
startDate = endDate - 24 * 60 * 60 * 1000;
}
return {
startDate,
endDate
};
}
fromJS({ eventsOrder, filters, events, custom, ...filterData }: any) {
let startDate, endDate;
const rValue = filterData.rangeValue || rangeValue;
@ -176,3 +214,4 @@ export default class Search {
});
}
}

View file

@ -1,6 +1,8 @@
import Period, { CUSTOM_RANGE } from 'Types/app/period';
import { filtersMap } from 'Types/filter/newFilter';
import Period, { CUSTOM_RANGE, LAST_24_HOURS } from 'Types/app/period';
const DEFAULT_SORT = 'startTs';
const DEFAULT_ORDER = 'desc';
const DEFAULT_EVENTS_ORDER = 'then';
class Filter {
key: string;
@ -25,24 +27,28 @@ class Filter {
}
}
class InputJson {
export class InputJson {
filters: Filter[];
rangeValue: string;
startDate: number;
endDate: number;
startDate?: number;
endDate?: number;
sort: string;
order: string;
eventsOrder: string;
constructor(filters: Filter[], rangeValue: string, startDate: number, endDate: number, sort: string, order: string, eventsOrder: string) {
constructor(
filters: Filter[],
rangeValue: string,
sort: string,
order: string,
eventsOrder: string,
startDate?: string | number,
endDate?: string | number
) {
this.filters = filters;
// .map((f: any) => {
// const subFilters = f.filters ? f.filters.map((sf: any) => new Filter(sf.key, sf.operator, sf.value, sf.filters)) : undefined;
// return new Filter(f.key, f.operator, f.value, subFilters);
// });
this.rangeValue = rangeValue;
this.startDate = startDate;
this.endDate = endDate;
this.startDate = startDate ? +startDate : undefined;
this.endDate = endDate ? +endDate : undefined;
this.sort = sort;
this.order = order;
this.eventsOrder = eventsOrder;
@ -50,17 +56,28 @@ class InputJson {
toJSON() {
return {
filters: this.filters.map(f => f.toJSON()),
filters: this.filters.map((f) => f.toJSON()),
rangeValue: this.rangeValue,
startDate: this.startDate,
endDate: this.endDate,
startDate: this.startDate ?? null,
endDate: this.endDate ?? null,
sort: this.sort,
order: this.order,
eventsOrder: this.eventsOrder
};
}
}
fromJSON(json: Record<string, any>): InputJson {
return new InputJson(
json.filters.map((f: any) => new Filter(f.key, f.operator, f.value, f.filters)),
json.rangeValue,
json.sort,
json.order,
json.eventsOrder,
json.startDate,
json.endDate
);
}
}
export class JsonUrlConverter {
static keyMap = {
@ -76,35 +93,46 @@ export class JsonUrlConverter {
filters: 'f'
};
static getDateRangeValues(rangeValue: string, startDate: number | undefined, endDate: number | undefined): [number, number] {
if (rangeValue === 'CUSTOM_RANGE') {
return [startDate!, endDate!];
static getDateRangeValues(
rangeValue: string,
startDate: string | null,
endDate: string | null
): [string, string] {
if (rangeValue === CUSTOM_RANGE) {
return [startDate || '', endDate || ''];
}
const period = Period({ rangeName: rangeValue });
const period: any = Period({ rangeName: rangeValue });
return [period.start, period.end];
}
static jsonToUrlParams(json: InputJson): string {
static jsonToUrlParams(json: Record<string, any>): string {
const params = new URLSearchParams();
const addFilterParams = (filter: Filter, prefix: string) => {
params.append(`${prefix}${this.keyMap.key}`, filter.key);
params.append(`${prefix}${this.keyMap.operator}`, filter.operator);
if (filter.value) {
filter.value.forEach((v, i) => params.append(`${prefix}${this.keyMap.value}[${i}]`, v || ''));
}
if (filter.filters) {
filter.filters.forEach((f, i) => addFilterParams(f, `${prefix}${this.keyMap.filters}[${i}].`));
}
filter.value?.forEach((v, i) =>
params.append(`${prefix}${this.keyMap.value}[${i}]`, v || '')
);
filter.filters?.forEach((f, i) =>
addFilterParams(f, `${prefix}${this.keyMap.filters}[${i}].`)
);
};
json.filters.forEach((filter, index) => addFilterParams(filter, `${this.keyMap.filters}[${index}].`));
const rangeValues = this.getDateRangeValues(json.rangeValue, json.startDate, json.endDate);
json.filters.forEach((filter: any, index: number) =>
addFilterParams(filter, `${this.keyMap.filters}[${index}].`)
);
params.append(this.keyMap.rangeValue, json.rangeValue);
params.append(this.keyMap.startDate, rangeValues[0].toString());
params.append(this.keyMap.endDate, rangeValues[1].toString());
if (json.rangeValue === CUSTOM_RANGE) {
const rangeValues = this.getDateRangeValues(
json.rangeValue,
json.startDate?.toString() || null,
json.endDate?.toString() || null
);
params.append(this.keyMap.startDate, rangeValues[0]);
params.append(this.keyMap.endDate, rangeValues[1]);
}
params.append(this.keyMap.sort, json.sort);
params.append(this.keyMap.order, json.order);
params.append(this.keyMap.eventsOrder, json.eventsOrder);
@ -130,7 +158,7 @@ export class JsonUrlConverter {
filters.push(getFilterParams(`${prefix}${this.keyMap.filters}[${index}].`));
index++;
}
return new Filter(key, operator, value.length ? value : '', filters.length ? filters : []);
return new Filter(key, operator, value.length ? value : [], filters.length ? filters : []);
};
const filters: Filter[] = [];
@ -140,23 +168,22 @@ export class JsonUrlConverter {
index++;
}
const rangeValue = params.get(this.keyMap.rangeValue) || 'LAST_24_HOURS';
const rangeValue = params.get(this.keyMap.rangeValue) || LAST_24_HOURS;
const rangeValues = this.getDateRangeValues(rangeValue, params.get(this.keyMap.startDate), params.get(this.keyMap.endDate));
const startDate = rangeValues[0];
const endDate = rangeValues[1];
return new InputJson(
filters,
rangeValue,
startDate,
endDate,
params.get(this.keyMap.sort) || 'startTs',
params.get(this.keyMap.order) || 'desc',
params.get(this.keyMap.eventsOrder) || 'then'
params.get(this.keyMap.sort) || DEFAULT_SORT,
params.get(this.keyMap.order) || DEFAULT_ORDER,
params.get(this.keyMap.eventsOrder) || DEFAULT_EVENTS_ORDER,
rangeValues[0],
rangeValues[1]
);
}
}
// Example usage
// const urlParams = '?f[0].k=click&f[0].op=on&f[0].v[0]=Refresh&f[1].k=fetch&f[1].op=is&f[1].v[0]=&f[1].f[0].k=fetchUrl&f[1].f[0].op=is&f[1].f[0].v[0]=/g/collect&f[1].f[1].k=fetchStatusCode&f[1].f[1].op=>=&f[1].f[1].v[0]=400&f[1].f[2].k=fetchMethod&f[1].f[2].op=is&f[1].f[2].v[0]=&f[1].f[3].k=fetchDuration&f[1].f[3].op==&f[1].f[3].v[0]=&f[1].f[4].k=fetchRequestBody&f[1].f[4].op=is&f[1].f[4].v[0]=&f[1].f[5].k=fetchResponseBody&f[1].f[5].op=is&f[1].f[5].v[0]=&rv=LAST_24_HOURS&sd=1731343412555&ed=1731429812555&s=startTs&o=desc&st=false&eo=then';
// const parsedJson = JsonUrlConverter.urlParamsToJson(urlParams);

View file

@ -1,6 +1,6 @@
{
"name": "@openreplay/network-proxy",
"version": "1.0.4",
"version": "1.0.5",
"description": "this library helps us to create proxy objects for fetch, XHR and beacons for proper request tracking.",
"main": "dist/index.js",
"module": "dist/index.js",
@ -8,6 +8,7 @@
"files": [
"dist"
],
"type": "module",
"scripts": {
"build": "tsc",
"test": "vitest",

View file

@ -0,0 +1,116 @@
{{- if or .Values.postgresql.oldPostgresqlPassword .Values.clickhouse.oldPassword }}
---
apiVersion: batch/v1
kind: Job
metadata:
name: update-db-passwords
namespace: "{{ .Release.Namespace }}"
annotations:
"helm.sh/hook": post-upgrade
"helm.sh/hook-weight": "-6" # Higher precidence, so the first the config map will get created.
spec:
template:
spec:
containers:
{{- if .Values.postgresql.oldPostgresqlPassword }}
- name: update-postgres-password
image: postgres:13
env:
- name: PGUSER
value: {{.Values.postgresql.postgresqlUser}}
- name: PGPASSWORD_NEW
value: {{.Values.postgresql.postgresqlPassword}} # current password
- name: PGPASSWORD_OLD
value: {{.Values.postgresql.oldPostgresqlPassword}} # old password
- name: PGHOST
value: {{.Values.postgresql.postgresqlHost}}
- name: PGPORT
value: "{{.Values.postgresql.postgresqlPort}}"
command: ["/bin/bash", "-c", "--"]
args:
- |
# Try to login with the current password
if PGPASSWORD=$PGPASSWORD_NEW psql -h $PGHOST -p $PGPORT -U $PGUSER -d postgres -c '\q'; then
echo "Successfully logged in with current password. No update needed."
exit 0
else
echo "Failed to login with current password, trying with old password."
# Try to login with the old password
if PGPASSWORD=$PGPASSWORD_OLD psql -h $PGHOST -p $PGPORT -U $PGUSER -d postgres -c '\q'; then
echo "Successfully logged in with old password. Updating password to the new one."
# Update the password to the new one
PGPASSWORD=$PGPASSWORD_OLD psql -h $PGHOST -p $PGPORT -U $PGUSER -d postgres -c "ALTER USER $PGUSER WITH PASSWORD '$PGPASSWORD_NEW';"
if [ $? -eq 0 ]; then
echo "Password updated successfully."
exit 0
else
echo "Failed to update the password."
exit 1
fi
else
echo "Failed to login with both current and old passwords."
exit 1
fi
fi
{{- end }}
{{- if .Values.clickhouse.oldPasswordnever }} # This will never trigger, as there is no clickhouse server right now.
- name: update-clickhouse-password
image: clickhouse/clickhouse-server:22.8
env:
- name: CLICKHOUSE_USER
value: {{.Values.clickhouse.username}}
- name: CLICKHOUSE_PASSWORD
value: {{.Values.clickhouse.password}} # current password
- name: CLICKHOUSE_PASSWORD_OLD
value: {{.Values.clickhouse.oldPassword}} # old password
- name: CLICKHOUSE_HOST
value: clickhouse-openreplay-clickhouse.db.svc.cluster.local
- name: CLICKHOUSE_PORT
value: "9000"
command: ["/bin/bash", "-c", "--"]
args:
- |
# Function to check if the Clickhouse server is reachable
is_clickhouse_reachable() {
[ "$(curl -s -o /dev/null -w '%{http_code}' http://$CLICKHOUSE_HOST:$CLICKHOUSE_PORT/ping)" -eq 200 ]
}
# Check if Clickhouse server is reachable
if is_clickhouse_reachable; then
echo "Clickhouse server is reachable, attempting to login with the current password."
# Try to login with the current password
if echo 'SELECT 1' | clickhouse-client --host $CLICKHOUSE_HOST --port $CLICKHOUSE_PORT --user $CLICKHOUSE_USER --password $CLICKHOUSE_PASSWORD; then
echo "Successfully logged in with current password. No update needed."
exit 0
else
echo "Failed to login with current password, trying with old password."
# Try to login with the old password
if echo 'SELECT 1' | clickhouse-client --host $CLICKHOUSE_HOST --port $CLICKHOUSE_PORT --user $CLICKHOUSE_USER --password $CLICKHOUSE_PASSWORD_OLD; then
echo "Successfully logged in with old password. Updating password to the new one."
# Generate a new random password and update it
new_password=$(openssl rand -hex 20)
clickhouse-client --host $CLICKHOUSE_HOST --port $CLICKHOUSE_PORT --user $CLICKHOUSE_USER --password $CLICKHOUSE_PASSWORD_OLD --query "ALTER USER $CLICKHOUSE_USER IDENTIFIED WITH PLAINTEXT_PASSWORD BY '$new_password';"
if [ $? -eq 0 ]; then
echo "Password updated successfully."
exit 0
else
echo "Failed to update the password."
exit 1
fi
else
echo "Failed to login with both current and old passwords."
exit 1
fi
fi
else
echo "Clickhouse server is not reachable."
exit 1
fi
{{- end}}
restartPolicy: Never
backoffLimit: 3
{{- end }}

View file

@ -119,7 +119,7 @@ function install_openreplay_actions() {
sudo rm -rf $openreplay_code_dir
fi
sudo cp -rfb ./vars.yaml $openreplay_home_dir
sudo cp -rf "$(cd ../.. && pwd)" $openreplay_code_dir
sudo cp -rf "$(cd ../.. && pwd)" $openreplay_home_dir
}
function main() {

View file

@ -203,6 +203,7 @@ function status() {
return
}
# Create OR version patch with gith sha
function patch_version() {
# Patching config version for console
version=$(/var/lib/openreplay/yq '.fromVersion' vars.yaml)-$(sudo git rev-parse --short HEAD)
@ -385,7 +386,7 @@ function upgrade() {
time_now=$(date +%m-%d-%Y-%I%M%S)
# Creating backup dir of current installation
[[ -d "$OR_DIR/openreplay" ]] && sudo mv "$OR_DIR/openreplay" "$OR_DIR/openreplay_${or_version//\"/}_${time_now}"
[[ -d "$OR_DIR/openreplay" ]] && sudo cp -rf "$OR_DIR/openreplay" "$OR_DIR/openreplay_${or_version//\"/}_${time_now}"
clone_repo
err_cd openreplay/scripts/helmcharts
@ -406,7 +407,8 @@ function upgrade() {
sudo mv ./openreplay-cli /bin/openreplay
sudo chmod +x /bin/openreplay
sudo mv ./vars.yaml "$OR_DIR"
sudo cp -rf ../../../openreplay "$OR_DIR/"
sudo rm -rf "$OR_DIR/openreplay" || true
sudo cp -rf "${tmp_dir}/openreplay" "$OR_DIR/"
log info "Configuration file is saved in /var/lib/openreplay/vars.yaml"
log info "Run ${BWHITE}openreplay -h${GREEN} to see the cli information to manage OpenReplay."

View file

@ -5,9 +5,25 @@ cd $(dirname $0)
is_migrate=$1
# Check if the openreplay version is set.
# This will take precedence over the .Values.fromVersion variable
# Because its created by installation programatically.
if [[ -n $OPENREPLAY_VERSION ]]; then
is_migrate=true
PREVIOUS_APP_VERSION=$OPENREPLAY_VERSION
echo "$OPENREPLAY_VERSION set"
fi
if [[ $FORCE_MIGRATION == "true" ]]; then
is_migrate=true
fi
# Passed from env
# PREVIOUS_APP_VERSION
# CHART_APP_VERSION
# Converting alphaneumeric to number.
PREVIOUS_APP_VERSION=$(echo $PREVIOUS_APP_VERSION | cut -d "v" -f2)
CHART_APP_VERSION=$(echo $CHART_APP_VERSION | cut -d "v" -f2)
function migration() {
ls -la /opt/openreplay/openreplay

View file

@ -2,9 +2,15 @@
Don't have to trigger migration if there is no version change
Don't have to trigger migration if skipMigration is set
Have to trigger migration if forceMigration is set
versionChange is true when:
Release.IsUpgrade is false.
Or .Values.deployment.argo is set.
Or Release.IsUpgrade is true and .Values.fromVersion is not equal to .Chart.AppVersion.
*/}}
{{- $versionChange := and (eq .Values.fromVersion .Chart.AppVersion) (.Release.IsUpgrade) }}
{{- if or (not (or .Values.skipMigration $versionChange)) .Values.forceMigration }}
{{- $versionChange := (or (not .Release.IsUpgrade) .Values.deployment.argo (and .Release.IsUpgrade (not (eq .Values.fromVersion .Chart.AppVersion)))) }}
{{- if or .Values.forceMigration (and (not .Values.skipMigration) $versionChange) }}
---
apiVersion: v1
kind: ConfigMap
@ -91,6 +97,7 @@ spec:
check_github
git clone {{ .Values.global.dbMigrationUpstreamRepoURL | default "https://github.com/openreplay/openreplay" }} .
ls /opt/openreplay/openreplay
git checkout {{ default .Chart.AppVersion .Values.dbMigrationUpstreamBranch }} || exit 10
git log -1

View file

@ -5,6 +5,11 @@ migrationJob:
migration:
env: {}
deployment:
argo: false
forceMigration: false
skipMigration: false
redis: &redis
tls:
enabled: false

View file

@ -1,7 +1,7 @@
{
"name": "@openreplay/tracker-assist",
"description": "Tracker plugin for screen assistance through the WebRTC",
"version": "10.0.1",
"version": "10.0.2",
"keywords": [
"WebRTC",
"assistance",
@ -34,7 +34,7 @@
"socket.io-client": "^4.8.1"
},
"peerDependencies": {
"@openreplay/tracker": "^14.0.14"
"@openreplay/tracker": ">=14.0.14"
},
"devDependencies": {
"@openreplay/tracker": "file:../tracker",

View file

@ -1 +1 @@
export const pkgVersion = "10.0.1";
export const pkgVersion = "10.0.2";

View file

@ -1,7 +1,7 @@
{
"name": "@openreplay/tracker",
"description": "The OpenReplay tracker main package",
"version": "15.0.0",
"version": "15.0.1",
"keywords": [
"logging",
"replay"
@ -26,16 +26,19 @@
},
"files": [
"dist/lib/**/*",
"dist/cjs/**/*"
"dist/cjs/**/*",
"dist/types/**/*"
],
"main": "./dist/cjs/index.js",
"module": "./dist/lib/index.js",
"types": "./dist/lib/main/index.d.ts",
"scripts": {
"lint": "eslint src --ext .ts,.js --fix --quiet",
"clean": "rm -Rf build && rm -Rf lib && rm -Rf cjs",
"clean": "rm -Rf build && rm -Rf dist",
"build:common": "tsc -b src/common",
"build": "yarn run clean && rollup --config rollup.config.js",
"compile": "tsc --project src/main/tsconfig.json",
"create-types": "mkdir dist/lib/ dist/cjs && cp -r dist/types/* dist/lib/ && cp -r dist/types/* dist/cjs/",
"build": "yarn run clean && yarn compile && yarn create-types && rollup --config rollup.config.js",
"lint-front": "lint-staged",
"test": "jest --coverage=false",
"test:ci": "jest --coverage=true",
@ -46,6 +49,7 @@
"@babel/core": "^7.26.0",
"@jest/globals": "^29.7.0",
"@rollup/plugin-babel": "^6.0.4",
"@rollup/plugin-commonjs": "^28.0.1",
"@rollup/plugin-node-resolve": "^15.3.0",
"@rollup/plugin-replace": "^6.0.1",
"@rollup/plugin-terser": "0.4.4",
@ -67,7 +71,7 @@
},
"dependencies": {
"@medv/finder": "^3.2.0",
"@openreplay/network-proxy": "^1.0.4",
"@openreplay/network-proxy": "^1.0.5",
"error-stack-parser": "^2.0.6",
"error-stack-parser-es": "^0.1.5",
"fflate": "^0.8.2",

View file

@ -3,6 +3,7 @@ import typescript from '@rollup/plugin-typescript'
import terser from '@rollup/plugin-terser'
import replace from '@rollup/plugin-replace'
import { rollup } from 'rollup'
import commonjs from '@rollup/plugin-commonjs';
import { createRequire } from 'module'
const require = createRequire(import.meta.url)
const packageConfig = require('./package.json')
@ -12,7 +13,6 @@ export default async () => {
const commonPlugins = [
resolve(),
// terser(),
replace({
preventAssignment: true,
values: {
@ -23,7 +23,7 @@ export default async () => {
]
return [
{
input: 'src/main/index.ts',
input: 'build/main/index.js',
output: {
dir: 'dist/lib',
format: 'es',
@ -32,13 +32,10 @@ export default async () => {
},
plugins: [
...commonPlugins,
typescript({
tsconfig: 'src/main/tsconfig.json',
}),
],
},
{
input: 'src/main/index.ts',
input: 'build/main/index.js',
output: {
dir: 'dist/cjs',
format: 'cjs',
@ -47,9 +44,7 @@ export default async () => {
},
plugins: [
...commonPlugins,
typescript({
tsconfig: 'src/main/tsconfig-cjs.json',
}),
commonjs(),
],
},
]

View file

@ -8,7 +8,7 @@ function processMapInBatches(
const iterator = map.entries()
function processNextBatch() {
const batch = []
const batch: any[] = []
let result = iterator.next()
while (!result.done && batch.length < batchSize) {

Some files were not shown because too many files have changed in this diff Show more