dev pull changes resolved conflicts

This commit is contained in:
Shekar Siri 2023-01-05 14:35:35 +01:00
commit 275554b625
45 changed files with 350 additions and 285 deletions

View file

@ -67,7 +67,10 @@ jobs:
} && {
echo "Skipping Security Checks"
}
docker push $DOCKER_REPO/$image:$IMAGE_TAG
images=("chalice" "alerts")
for image in ${images[*]};do
docker push $DOCKER_REPO/$image:$IMAGE_TAG
done
- name: Creating old image input
run: |
#
@ -107,7 +110,7 @@ jobs:
cat /tmp/image_override.yaml
# Deploy command
helm upgrade --install openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set skipMigration=true --no-hooks
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# We're not passing -ee flag, because helm will add that.

View file

@ -66,7 +66,10 @@ jobs:
} && {
echo "Skipping Security Checks"
}
docker push $DOCKER_REPO/$image:$IMAGE_TAG
images=("chalice" "alerts")
for image in ${images[*]};do
docker push $DOCKER_REPO/$image:$IMAGE_TAG
done
- name: Creating old image input
run: |
#
@ -131,4 +134,4 @@ jobs:
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}
# ENVIRONMENT: staging
#

View file

@ -43,7 +43,7 @@ jobs:
PUSH_IMAGE=1 bash build.sh
- name: Deploy to kubernetes
run: |
cd scripts/helm/
cd scripts/helmcharts/
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\" #g" vars.yaml
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\" #g" vars.yaml

View file

@ -71,12 +71,10 @@ jobs:
case ${build_param} in
false)
{
git diff --name-only HEAD HEAD~1 | grep -E "backend/cmd|backend/services" | grep -vE ^ee/ | cut -d '/' -f3
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
done
} | uniq > /tmp/images_to_build.txt
} | awk '!seen[$0]++' > /tmp/images_to_build.txt
;;
all)
ls backend/cmd > /tmp/images_to_build.txt
@ -160,17 +158,16 @@ jobs:
# Deploy command
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
#- name: Alert slack
# if: ${{ failure() }}
# uses: rtCamp/action-slack-notify@v2
# env:
# SLACK_CHANNEL: ee
# SLACK_TITLE: "Failed ${{ github.workflow }}"
# SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
# SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
# SLACK_USERNAME: "OR Bot"
# SLACK_MESSAGE: 'Build failed :bomb:'
- name: Alert slack
if: ${{ failure() }}
uses: rtCamp/action-slack-notify@v2
env:
SLACK_CHANNEL: ee
SLACK_TITLE: "Failed ${{ github.workflow }}"
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: 'Build failed :bomb:'
# - name: Debug Job
# if: ${{ failure() }}

View file

@ -71,12 +71,10 @@ jobs:
case ${build_param} in
false)
{
git diff --name-only HEAD HEAD~1 | grep -E "backend/cmd|backend/services" | grep -vE ^ee/ | cut -d '/' -f3
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
done
} | uniq > /tmp/images_to_build.txt
} | awk '!seen[$0]++' > /tmp/images_to_build.txt
;;
all)
ls backend/cmd > /tmp/images_to_build.txt

3
api/.trivyignore Normal file
View file

@ -0,0 +1,3 @@
# Accept the risk until
# python setup tools recently fixed. Not yet avaialable in distros.
CVE-2022-40897 exp:2023-02-01

View file

@ -1,6 +1,9 @@
FROM python:3.10-alpine
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
ARG GIT_SHA
LABEL GIT_SHA=$GIT_SHA
RUN apk add --no-cache build-base tini
ARG envarg
# Add Tini
@ -9,7 +12,8 @@ ENV SOURCE_MAP_VERSION=0.7.4 \
APP_NAME=chalice \
LISTEN_PORT=8000 \
PRIVATE_ENDPOINTS=false \
ENTERPRISE_BUILD=${envarg}
ENTERPRISE_BUILD=${envarg} \
GIT_SHA=$GIT_SHA
WORKDIR /work
COPY requirements.txt ./requirements.txt

View file

@ -1,6 +1,9 @@
FROM python:3.10-alpine
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
ARG GIT_SHA
LABEL GIT_SHA=$GIT_SHA
RUN apk add --no-cache build-base tini
ARG envarg
ENV APP_NAME=alerts \
@ -8,6 +11,7 @@ ENV APP_NAME=alerts \
PG_MAXCONN=10 \
LISTEN_PORT=8000 \
PRIVATE_ENDPOINTS=true \
GIT_SHA=$GIT_SHA \
ENTERPRISE_BUILD=${envarg}
WORKDIR /work

View file

@ -16,7 +16,8 @@ exit_err() {
}
environment=$1
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-git_sha}
envarg="default-foss"
check_prereq() {
which docker || {
@ -41,14 +42,17 @@ function build_api(){
tag="ee-"
}
mv Dockerfile.dockerignore .dockerignore
docker build -f ./Dockerfile --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/chalice:${git_sha1} .
docker build -f ./Dockerfile --build-arg envarg=$envarg --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/chalice:${image_tag} .
cd ../api
rm -rf ../${destination}
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/chalice:${git_sha1}
docker tag ${DOCKER_REPO:-'local'}/chalice:${git_sha1} ${DOCKER_REPO:-'local'}/chalice:${tag}latest
docker push ${DOCKER_REPO:-'local'}/chalice:${image_tag}
docker tag ${DOCKER_REPO:-'local'}/chalice:${image_tag} ${DOCKER_REPO:-'local'}/chalice:${tag}latest
docker push ${DOCKER_REPO:-'local'}/chalice:${tag}latest
}
[[ $SIGN_IMAGE -eq 1 ]] && {
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/chalice:${image_tag}
}
echo "api docker build completed"
}

View file

@ -7,7 +7,8 @@
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-git_sha}
envarg="default-foss"
check_prereq() {
which docker || {
@ -31,12 +32,12 @@ function build_alerts(){
tag="ee-"
}
mv Dockerfile_alerts.dockerignore .dockerignore
docker build -f ./Dockerfile_alerts --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/alerts:${git_sha1} .
docker build -f ./Dockerfile_alerts --build-arg envarg=$envarg --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/alerts:${image_tag} .
cd ../api
rm -rf ../${destination}
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/alerts:${git_sha1}
docker tag ${DOCKER_REPO:-'local'}/alerts:${git_sha1} ${DOCKER_REPO:-'local'}/alerts:${tag}latest
docker push ${DOCKER_REPO:-'local'}/alerts:${image_tag}
docker tag ${DOCKER_REPO:-'local'}/alerts:${image_tag} ${DOCKER_REPO:-'local'}/alerts:${tag}latest
docker push ${DOCKER_REPO:-'local'}/alerts:${tag}latest
}
echo "completed alerts build"

View file

@ -1,60 +1,72 @@
<!DOCTYPE html>
<html>
<body style="margin: 0; padding: 0; font-family: -apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Oxygen-Sans,Ubuntu,Cantarell,'Helvetica Neue',sans-serif; color: #6c757d">
<table width="100%" border="0" style="background-color: #f6f6f6">
<tr>
<td>
<div style="border-radius: 3px; border-radius:4px; overflow: hidden; background-color: #ffffff; max-width: 600px; margin:20px auto;">
<table style="margin:20px auto; border:1px solid transparent; border-collapse:collapse; background-color: #ffffff; max-width:600px"
width="100%">
<!--Main Image-->
<tr>
<td style="padding:10px 30px;">
<center>
<img src="img/logo.png" alt="OpenReplay" width="100%" style="max-width: 120px;">
</center>
</td>
</tr>
<!--Main Title and Overview -->
<body
style="margin: 0; padding: 0; font-family: -apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Oxygen-Sans,Ubuntu,Cantarell,'Helvetica Neue',sans-serif; color: #6c757d">
<table width="100%" border="0" style="background-color: #f6f6f6">
<tr>
<td>
<div
style="border-radius: 3px; border-radius:4px; overflow: hidden; background-color: #ffffff; max-width: 600px; margin:20px auto;">
<table
style="margin:20px auto; border:1px solid transparent; border-collapse:collapse; background-color: #ffffff; max-width:600px"
width="100%">
<!--Main Image-->
<tr>
<td style="padding:10px 30px;">
<center>
<img src="img/logo.png" alt="OpenReplay" width="100%" style="max-width: 120px;">
</center>
</td>
</tr>
<tr>
<td style="padding:0 15px;">
<h1 style="font-family: -apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Oxygen-Sans,Ubuntu,Cantarell,'Helvetica Neue',sans-serif; color: #286f6a">
New alert!</h1>
<p style="font-family: -apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Oxygen-Sans,Ubuntu,Cantarell,'Helvetica Neue',sans-serif; color: #6c757d">
%(message)s</p>
<p style="font-family: -apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Oxygen-Sans,Ubuntu,Cantarell,'Helvetica Neue',sans-serif; color: #6c757d">
<a href="%(frontend_url)s/%(project_id)s/metrics">See metrics</a> for more details.</p>
<!--Main Title and Overview -->
</td>
</tr>
<!--Footer-->
<tr>
<td style="padding: 0 15px">
<div style="border-top:1px dotted rgba(0,0,0,0.2); display: block; margin-top: 20px"></div>
<center>
<p style="font-size: 12px; font-family: -apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Oxygen-Sans,Ubuntu,Cantarell,'Helvetica Neue',sans-serif; color: #6c757d">
Sent with &#9825; from OpenReplay &copy; 2022 - All rights reserved.<br><br>
<a href="https://openreplay.com" target="_blank"
style="text-decoration: none; color: #6c757d">https://openreplay.com/</a>
<tr>
<td style="padding:0 15px;">
<h1
style="font-family: -apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Oxygen-Sans,Ubuntu,Cantarell,'Helvetica Neue',sans-serif; color: #286f6a">
New alert!</h1>
<p
style="font-family: -apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Oxygen-Sans,Ubuntu,Cantarell,'Helvetica Neue',sans-serif; color: #6c757d">
%(message)s</p>
<p
style="font-family: -apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Oxygen-Sans,Ubuntu,Cantarell,'Helvetica Neue',sans-serif; color: #6c757d">
<a href="%(frontend_url)s/%(project_id)s/metrics">See metrics</a> for more details.
</p>
</center>
</td>
</tr>
</table>
</td>
</tr>
</div>
</td>
</tr>
</table>
<!--Footer-->
<tr>
<td style="padding: 0 15px">
<div style="border-top:1px dotted rgba(0,0,0,0.2); display: block; margin-top: 20px">
</div>
<center>
<p
style="font-size: 12px; font-family: -apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Oxygen-Sans,Ubuntu,Cantarell,'Helvetica Neue',sans-serif; color: #6c757d">
Sent with &#9825; from OpenReplay &copy;
<script>document.write(`${new Date().getFullYear()}`)</script> - All rights
reserved.<br><br>
<a href="https://openreplay.com" target="_blank"
style="text-decoration: none; color: #6c757d">https://openreplay.com/</a>
</p>
</center>
</td>
</tr>
</table>
</div>
</td>
</tr>
</table>
</body>
</html>
</html>

View file

@ -19,10 +19,14 @@ RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o service -tags dynamic open
FROM alpine AS entrypoint
ARG GIT_SHA
LABEL GIT_SHA=$GIT_SHA
RUN apk add --no-cache ca-certificates librdkafka-dev cyrus-sasl cyrus-sasl-gssapiv2 krb5
RUN adduser -u 1001 openreplay -D
ENV TZ=UTC \
GIT_SHA=$GIT_SHA \
FS_ULIMIT=1000 \
FS_DIR=/mnt/efs \
MAXMINDDB_FILE=/home/openreplay/geoip.mmdb \

View file

@ -9,7 +9,8 @@
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
set -e
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-git_sha}
ee="false"
check_prereq() {
which docker || {
@ -22,9 +23,12 @@ check_prereq() {
function build_service() {
image="$1"
echo "BUILDING $image"
docker build -t ${DOCKER_REPO:-'local'}/$image:${git_sha1} --platform linux/amd64 --build-arg SERVICE_NAME=$image .
docker build -t ${DOCKER_REPO:-'local'}/$image:${image_tag} --platform linux/amd64 --build-arg SERVICE_NAME=$image --build-arg GIT_SHA=$git_sha .
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/$image:${git_sha1}
docker push ${DOCKER_REPO:-'local'}/$image:${image_tag}
}
[[ $SIGN_IMAGE -eq 1 ]] && {
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/$image:${image_tag}
}
echo "Build completed for $image"
return
@ -51,7 +55,7 @@ function build_api(){
for image in $(ls cmd);
do
build_service $image
echo "::set-output name=image::${DOCKER_REPO:-'local'}/$image:${git_sha1}"
echo "::set-output name=image::${DOCKER_REPO:-'local'}/$image:${image_tag}"
done
cd ../backend
rm -rf ../${destination}

View file

@ -44,6 +44,8 @@ type Storage struct {
readingDEVTime syncfloat64.Histogram
archivingDOMTime syncfloat64.Histogram
archivingDEVTime syncfloat64.Histogram
uploadingDOMTime syncfloat64.Histogram
uploadingDEVTime syncfloat64.Histogram
tasks chan *Task
ready chan struct{}
@ -85,6 +87,14 @@ func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Stor
if err != nil {
log.Printf("can't create archiving_duration metric: %s", err)
}
uploadingDOMTime, err := metrics.RegisterHistogram("uploading_duration")
if err != nil {
log.Printf("can't create uploading_duration metric: %s", err)
}
uploadingDEVTime, err := metrics.RegisterHistogram("uploading_dt_duration")
if err != nil {
log.Printf("can't create uploading_duration metric: %s", err)
}
newStorage := &Storage{
cfg: cfg,
s3: s3,
@ -96,6 +106,8 @@ func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Stor
readingDEVTime: readingDEVTime,
archivingDOMTime: archivingDOMTime,
archivingDEVTime: archivingDEVTime,
uploadingDOMTime: uploadingDOMTime,
uploadingDEVTime: uploadingDEVTime,
tasks: make(chan *Task, 1),
ready: make(chan struct{}),
}
@ -119,13 +131,13 @@ func (s *Storage) Upload(msg *messages.SessionEnd) (err error) {
wg.Add(2)
go func() {
if prepErr := s.prepareSession(filePath, DOM, newTask); prepErr != nil {
err = fmt.Errorf("prepareSession err: %s", prepErr)
err = fmt.Errorf("prepareSession DOM err: %s", prepErr)
}
wg.Done()
}()
go func() {
if prepErr := s.prepareSession(filePath, DEV, newTask); prepErr != nil {
err = fmt.Errorf("prepareSession err: %s", prepErr)
err = fmt.Errorf("prepareSession DEV err: %s", prepErr)
}
wg.Done()
}()
@ -237,33 +249,46 @@ func (s *Storage) compressSession(data []byte) *bytes.Buffer {
func (s *Storage) uploadSession(task *Task) {
wg := &sync.WaitGroup{}
wg.Add(3)
var (
uploadDoms int64 = 0
uploadDome int64 = 0
uploadDev int64 = 0
)
go func() {
if task.doms != nil {
start := time.Now()
if err := s.s3.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", true); err != nil {
log.Fatalf("Storage: start upload failed. %s", err)
}
uploadDoms = time.Now().Sub(start).Milliseconds()
}
wg.Done()
}()
go func() {
if task.dome != nil {
start := time.Now()
if err := s.s3.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", true); err != nil {
log.Fatalf("Storage: start upload failed. %s", err)
}
uploadDome = time.Now().Sub(start).Milliseconds()
}
wg.Done()
}()
go func() {
if task.dev != nil {
start := time.Now()
if err := s.s3.Upload(task.dev, task.id+string(DEV), "application/octet-stream", true); err != nil {
log.Fatalf("Storage: start upload failed. %s", err)
}
uploadDev = time.Now().Sub(start).Milliseconds()
}
wg.Done()
}()
wg.Wait()
// Record metrics
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200)
s.uploadingDOMTime.Record(ctx, float64(uploadDoms+uploadDome))
s.uploadingDEVTime.Record(ctx, float64(uploadDev))
s.totalSessions.Add(ctx, 1)
}

View file

@ -2,8 +2,14 @@ package postgres
import (
"bytes"
"context"
"errors"
"fmt"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
"log"
"openreplay/backend/pkg/monitoring"
"time"
)
const (
@ -18,13 +24,15 @@ type Bulk interface {
}
type bulkImpl struct {
conn Pool
table string
columns string
template string
setSize int
sizeLimit int
values []interface{}
conn Pool
table string
columns string
template string
setSize int
sizeLimit int
values []interface{}
bulkSize syncfloat64.Histogram
bulkDuration syncfloat64.Histogram
}
func (b *bulkImpl) Append(args ...interface{}) error {
@ -46,6 +54,8 @@ func (b *bulkImpl) Send() error {
}
func (b *bulkImpl) send() error {
start := time.Now()
size := len(b.values) / b.setSize
request := bytes.NewBufferString(insertPrefix + b.table + b.columns + insertValues)
args := make([]interface{}, b.setSize)
for i := 0; i < len(b.values)/b.setSize; i++ {
@ -63,13 +73,19 @@ func (b *bulkImpl) send() error {
if err != nil {
return fmt.Errorf("send bulk err: %s", err)
}
// Save bulk metrics
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200)
b.bulkDuration.Record(ctx, float64(time.Now().Sub(start).Milliseconds()), attribute.String("table", b.table))
b.bulkSize.Record(ctx, float64(size), attribute.String("table", b.table))
return nil
}
func NewBulk(conn Pool, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
func NewBulk(conn Pool, metrics *monitoring.Metrics, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
switch {
case conn == nil:
return nil, errors.New("db conn is empty")
case metrics == nil:
return nil, errors.New("metrics is empty")
case table == "":
return nil, errors.New("table is empty")
case columns == "":
@ -81,13 +97,23 @@ func NewBulk(conn Pool, table, columns, template string, setSize, sizeLimit int)
case sizeLimit <= 0:
return nil, errors.New("size limit is wrong")
}
messagesInBulk, err := metrics.RegisterHistogram("messages_in_bulk")
if err != nil {
log.Printf("can't create messages_size metric: %s", err)
}
bulkInsertDuration, err := metrics.RegisterHistogram("bulk_insert_duration")
if err != nil {
log.Printf("can't create messages_size metric: %s", err)
}
return &bulkImpl{
conn: conn,
table: table,
columns: columns,
template: template,
setSize: setSize,
sizeLimit: sizeLimit,
values: make([]interface{}, 0, setSize*sizeLimit),
conn: conn,
table: table,
columns: columns,
template: template,
setSize: setSize,
sizeLimit: sizeLimit,
values: make([]interface{}, 0, setSize*sizeLimit),
bulkSize: messagesInBulk,
bulkDuration: bulkInsertDuration,
}, nil
}

View file

@ -78,7 +78,7 @@ func NewConn(url string, queueLimit, sizeLimit int, metrics *monitoring.Metrics)
if err != nil {
log.Fatalf("can't create new pool wrapper: %s", err)
}
conn.initBulks()
conn.initBulks(metrics)
return conn
}
@ -107,9 +107,9 @@ func (conn *Conn) initMetrics(metrics *monitoring.Metrics) {
}
}
func (conn *Conn) initBulks() {
func (conn *Conn) initBulks(metrics *monitoring.Metrics) {
var err error
conn.autocompletes, err = NewBulk(conn.c,
conn.autocompletes, err = NewBulk(conn.c, metrics,
"autocomplete",
"(value, type, project_id)",
"($%d, $%d, $%d)",
@ -117,7 +117,7 @@ func (conn *Conn) initBulks() {
if err != nil {
log.Fatalf("can't create autocomplete bulk: %s", err)
}
conn.requests, err = NewBulk(conn.c,
conn.requests, err = NewBulk(conn.c, metrics,
"events_common.requests",
"(session_id, timestamp, seq_index, url, duration, success)",
"($%d, $%d, $%d, left($%d, 2700), $%d, $%d)",
@ -125,7 +125,7 @@ func (conn *Conn) initBulks() {
if err != nil {
log.Fatalf("can't create requests bulk: %s", err)
}
conn.customEvents, err = NewBulk(conn.c,
conn.customEvents, err = NewBulk(conn.c, metrics,
"events_common.customs",
"(session_id, timestamp, seq_index, name, payload)",
"($%d, $%d, $%d, left($%d, 2700), $%d)",
@ -133,7 +133,7 @@ func (conn *Conn) initBulks() {
if err != nil {
log.Fatalf("can't create customEvents bulk: %s", err)
}
conn.webPageEvents, err = NewBulk(conn.c,
conn.webPageEvents, err = NewBulk(conn.c, metrics,
"events.pages",
"(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+
"load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+
@ -144,7 +144,7 @@ func (conn *Conn) initBulks() {
if err != nil {
log.Fatalf("can't create webPageEvents bulk: %s", err)
}
conn.webInputEvents, err = NewBulk(conn.c,
conn.webInputEvents, err = NewBulk(conn.c, metrics,
"events.inputs",
"(session_id, message_id, timestamp, value, label)",
"($%d, $%d, $%d, $%d, NULLIF($%d,''))",
@ -152,7 +152,7 @@ func (conn *Conn) initBulks() {
if err != nil {
log.Fatalf("can't create webPageEvents bulk: %s", err)
}
conn.webGraphQL, err = NewBulk(conn.c,
conn.webGraphQL, err = NewBulk(conn.c, metrics,
"events.graphql",
"(session_id, timestamp, message_id, name, request_body, response_body)",
"($%d, $%d, $%d, left($%d, 2700), $%d, $%d)",
@ -160,7 +160,7 @@ func (conn *Conn) initBulks() {
if err != nil {
log.Fatalf("can't create webPageEvents bulk: %s", err)
}
conn.webErrors, err = NewBulk(conn.c,
conn.webErrors, err = NewBulk(conn.c, metrics,
"errors",
"(error_id, project_id, source, name, message, payload)",
"($%d, $%d, $%d, $%d, $%d, $%d::jsonb)",
@ -168,7 +168,7 @@ func (conn *Conn) initBulks() {
if err != nil {
log.Fatalf("can't create webErrors bulk: %s", err)
}
conn.webErrorEvents, err = NewBulk(conn.c,
conn.webErrorEvents, err = NewBulk(conn.c, metrics,
"events.errors",
"(session_id, message_id, timestamp, error_id)",
"($%d, $%d, $%d, $%d)",
@ -176,7 +176,7 @@ func (conn *Conn) initBulks() {
if err != nil {
log.Fatalf("can't create webErrorEvents bulk: %s", err)
}
conn.webErrorTags, err = NewBulk(conn.c,
conn.webErrorTags, err = NewBulk(conn.c, metrics,
"public.errors_tags",
"(session_id, message_id, error_id, key, value)",
"($%d, $%d, $%d, $%d, $%d)",
@ -184,7 +184,7 @@ func (conn *Conn) initBulks() {
if err != nil {
log.Fatalf("can't create webErrorEvents bulk: %s", err)
}
conn.webIssues, err = NewBulk(conn.c,
conn.webIssues, err = NewBulk(conn.c, metrics,
"issues",
"(project_id, issue_id, type, context_string)",
"($%d, $%d, $%d, $%d)",
@ -192,7 +192,7 @@ func (conn *Conn) initBulks() {
if err != nil {
log.Fatalf("can't create webIssues bulk: %s", err)
}
conn.webIssueEvents, err = NewBulk(conn.c,
conn.webIssueEvents, err = NewBulk(conn.c, metrics,
"events_common.issues",
"(session_id, issue_id, timestamp, seq_index, payload)",
"($%d, $%d, $%d, $%d, CAST($%d AS jsonb))",
@ -200,7 +200,7 @@ func (conn *Conn) initBulks() {
if err != nil {
log.Fatalf("can't create webIssueEvents bulk: %s", err)
}
conn.webCustomEvents, err = NewBulk(conn.c,
conn.webCustomEvents, err = NewBulk(conn.c, metrics,
"events_common.customs",
"(session_id, seq_index, timestamp, name, payload, level)",
"($%d, $%d, $%d, left($%d, 2700), $%d, $%d)",
@ -208,7 +208,7 @@ func (conn *Conn) initBulks() {
if err != nil {
log.Fatalf("can't create webCustomEvents bulk: %s", err)
}
conn.webClickEvents, err = NewBulk(conn.c,
conn.webClickEvents, err = NewBulk(conn.c, metrics,
"events.clicks",
"(session_id, message_id, timestamp, label, selector, url, path)",
"($%d, $%d, $%d, NULLIF($%d, ''), $%d, $%d, $%d)",
@ -216,7 +216,7 @@ func (conn *Conn) initBulks() {
if err != nil {
log.Fatalf("can't create webClickEvents bulk: %s", err)
}
conn.webNetworkRequest, err = NewBulk(conn.c,
conn.webNetworkRequest, err = NewBulk(conn.c, metrics,
"events_common.requests",
"(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success)",
"($%d, $%d, $%d, left($%d, 2700), $%d, $%d, $%d, $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d)",

View file

@ -38,7 +38,7 @@ func New(name string) *Metrics {
// initPrometheusDataExporter allows to use collected metrics in prometheus
func (m *Metrics) initPrometheusDataExporter() {
config := prometheus.Config{
DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50},
DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50, 100, 250, 500, 1000},
}
c := controller.New(
processor.NewFactory(
@ -76,8 +76,8 @@ Counter is a synchronous instrument that measures additive non-decreasing values
*/
func (m *Metrics) RegisterCounter(name string) (syncfloat64.Counter, error) {
if _, ok := m.counters[name]; ok {
return nil, fmt.Errorf("counter %s already exists", name)
if counter, ok := m.counters[name]; ok {
return counter, nil
}
counter, err := m.meter.SyncFloat64().Counter(name)
if err != nil {
@ -100,8 +100,8 @@ for example, the number of:
*/
func (m *Metrics) RegisterUpDownCounter(name string) (syncfloat64.UpDownCounter, error) {
if _, ok := m.upDownCounters[name]; ok {
return nil, fmt.Errorf("upDownCounter %s already exists", name)
if counter, ok := m.upDownCounters[name]; ok {
return counter, nil
}
counter, err := m.meter.SyncFloat64().UpDownCounter(name)
if err != nil {
@ -122,8 +122,8 @@ Histogram is a synchronous instrument that produces a histogram from recorded va
*/
func (m *Metrics) RegisterHistogram(name string) (syncfloat64.Histogram, error) {
if _, ok := m.histograms[name]; ok {
return nil, fmt.Errorf("histogram %s already exists", name)
if hist, ok := m.histograms[name]; ok {
return hist, nil
}
hist, err := m.meter.SyncFloat64().Histogram(name)
if err != nil {

View file

@ -36,6 +36,9 @@ function build_crons(){
docker tag ${DOCKER_REPO:-'local'}/crons:${git_sha1} ${DOCKER_REPO:-'local'}/crons:${tag}latest
docker push ${DOCKER_REPO:-'local'}/crons:${tag}latest
}
[[ $SIGN_IMAGE -eq 1 ]] && {
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/crons:${image_tag}
}
echo "completed crons build"
}

View file

@ -14,9 +14,13 @@ COPY nginx.conf /etc/nginx/conf.d/default.conf
# Default step in docker build
FROM nginx:alpine
LABEL maintainer=Rajesh<rajesh@openreplay.com>
ARG GIT_SHA
LABEL GIT_SHA=$GIT_SHA
COPY --from=builder /work/public /var/www/openreplay
COPY nginx.conf /etc/nginx/conf.d/default.conf
ENV GIT_SHA=$GIT_SHA
EXPOSE 8080
RUN chown -R nginx:nginx /var/cache/nginx && \
chown -R nginx:nginx /var/log/nginx && \

View file

@ -89,7 +89,7 @@ const MULTIVIEW_INDEX_PATH = routes.multiviewIndex();
@connect(
(state) => {
const siteId = state.getIn(['site', 'siteId']);
const jwt = state.get('jwt');
const jwt = state.getIn(['user', 'jwt']);
const changePassword = state.getIn(['user', 'account', 'changePassword']);
const userInfoLoading = state.getIn(['user', 'fetchUserInfoRequest', 'loading']);
return {

View file

@ -58,7 +58,7 @@ export const clean = (obj, forbidenValues = [ undefined, '' ]) => {
export default class APIClient {
constructor() {
const jwt = store.getState().get('jwt');
const jwt = store.getState().getIn(['user', 'jwt']);
const siteId = store.getState().getIn([ 'site', 'siteId' ]);
this.init = {
headers: {

View file

@ -1,8 +1,8 @@
import logger from 'App/logger';
import APIClient from './api_client';
import { UPDATE, DELETE } from './duck/jwt';
import { UPDATE_JWT } from './duck/user';
export default (store) => (next) => (action) => {
export default () => (next) => (action) => {
const { types, call, ...rest } = action;
if (!call) {
return next(action);
@ -14,7 +14,7 @@ export default (store) => (next) => (action) => {
return call(client)
.then(async (response) => {
if (response.status === 403) {
next({ type: DELETE });
next({ type: UPDATE_JWT, data: null });
}
if (!response.ok) {
const text = await response.text();
@ -30,7 +30,7 @@ export default (store) => (next) => (action) => {
next({ type: SUCCESS, data, ...rest });
}
if (jwt) {
next({ type: UPDATE, data: jwt });
next({ type: UPDATE_JWT, data: jwt });
}
})
.catch((e) => {

View file

@ -8,7 +8,7 @@ import ReCAPTCHA from 'react-google-recaptcha';
import { withRouter } from 'react-router-dom';
import stl from './login.module.css';
import cn from 'classnames';
import { setJwt } from 'Duck/jwt';
import { setJwt } from 'Duck/user';
const FORGOT_PASSWORD = forgotPassword();
const SIGNUP_ROUTE = signup();

View file

@ -150,6 +150,7 @@ export default connect(
session: state.getIn(['sessions', 'current']),
insights: state.getIn(['sessions', 'insights']),
visitedEvents: state.getIn(['sessions', 'visitedEvents']),
jwt: state.getIn(['user', 'jwt']),
fullscreen: state.getIn(['components', 'player', 'fullscreen']),
showEvents: state.get('showEvents'),
members: state.getIn(['members', 'list']),

View file

@ -1,7 +1,6 @@
// @ts-ignore
import { combineReducers } from 'redux-immutable';
import jwt from './jwt';
import user from './user';
import sessions from './sessions';
import assignments from './assignments';
@ -26,7 +25,6 @@ import search from './search';
import liveSearch from './liveSearch';
const rootReducer = combineReducers({
jwt,
user,
sessions,
assignments,

View file

@ -1,19 +0,0 @@
export const UPDATE = 'jwt/UPDATE';
export const DELETE = 'jwt/DELETE';
export default (state = null, action = {}) => {
switch (action.type) {
case UPDATE:
return action.data;
case DELETE:
return null;
}
return state;
};
export function setJwt(data) {
return {
type: UPDATE,
data,
};
}

View file

@ -2,7 +2,6 @@ import { List, Map } from 'immutable';
import Client from 'Types/client';
import { deleteCookie } from 'App/utils';
import Account from 'Types/account';
import { DELETE } from './jwt';
import withRequestState, { RequestTypes } from './requestStateCreator';
export const LOGIN = new RequestTypes('user/LOGIN');
@ -20,7 +19,7 @@ const PUT_CLIENT = new RequestTypes('user/PUT_CLIENT');
const PUSH_NEW_SITE = 'user/PUSH_NEW_SITE';
const SET_ONBOARDING = 'user/SET_ONBOARDING';
const initialState = Map({
export const initialState = Map({
account: Account(),
siteId: null,
passwordRequestError: false,
@ -28,7 +27,8 @@ const initialState = Map({
tenants: [],
authDetails: {},
onboarding: false,
sites: List()
sites: List(),
jwt: null
});
const setClient = (state, data) => {
@ -36,8 +36,20 @@ const setClient = (state, data) => {
return state.set('client', client)
}
export const UPDATE_JWT = 'jwt/UPDATE';
export const DELETE = 'jwt/DELETE'
export function setJwt(data) {
return {
type: UPDATE_JWT,
data,
};
}
const reducer = (state = initialState, action = {}) => {
switch (action.type) {
case UPDATE_JWT:
return state.set('jwt', action.data);
case RESET_PASSWORD.SUCCESS:
case UPDATE_PASSWORD.SUCCESS:
case LOGIN.SUCCESS:
@ -54,7 +66,10 @@ const reducer = (state = initialState, action = {}) => {
// return state.set('tenants', action.data.map(i => ({ text: i.name, value: i.tenantId})));
case UPDATE_PASSWORD.FAILURE:
return state.set('passwordErrors', List(action.errors))
case FETCH_ACCOUNT.FAILURE:
case LOGIN.FAILURE:
case DELETE:
console.log('hi')
deleteCookie('jwt', '/', '.openreplay.com')
return initialState;
case PUT_CLIENT.REQUEST:
@ -114,12 +129,10 @@ export function fetchTenants() {
}
}
export const fetchUserInfo = () => dispatch => Promise.all([
dispatch({
export const fetchUserInfo = () => ({
types: FETCH_ACCOUNT.toArray(),
call: client => client.get('/account'),
}),
]);
});
export function logout() {
return {

View file

@ -123,7 +123,10 @@ export default class MessageManager {
private readonly screen: Screen,
initialLists?: Partial<InitialLists>
) {
this.pagesManager = new PagesManager(screen, this.session.isMobile, this)
this.pagesManager = new PagesManager(screen, this.session.isMobile, cssLoading => {
screen.displayFrame(!cssLoading)
state.update({ cssLoading })
})
this.mouseMoveManager = new MouseMoveManager(screen)
this.sessionStart = this.session.startedAt
@ -483,11 +486,6 @@ export default class MessageManager {
this.state.update({ messagesLoading });
}
setCSSLoading(cssLoading: boolean) {
this.screen.displayFrame(!cssLoading);
this.state.update({ cssLoading });
}
private setSize({ height, width }: { height: number, width: number }) {
this.screen.scale({ height, width });
this.state.update({ width, height });

View file

@ -28,7 +28,7 @@ export default class WebLivePlayer extends WebPlayer {
this.assistManager = new AssistManager(
session,
f => this.messageManager.setCSSLoading(f),
f => this.messageManager.setMessagesLoading(f),
(msg, idx) => {
this.incomingMessages.push(msg)
if (!this.historyFileIsLoading) {

View file

@ -56,10 +56,10 @@ export default class DOMManager extends ListWalker<Message> {
private readonly screen: Screen,
private readonly isMobile: boolean,
public readonly time: number,
mm: MessageManager,
setCssLoading: ConstructorParameters<typeof StylesManager>[1],
) {
super()
this.stylesManager = new StylesManager(screen, mm)
this.stylesManager = new StylesManager(screen, setCssLoading)
}
append(m: Message): void {

View file

@ -24,7 +24,7 @@ export default class StylesManager {
private linkLoadPromises: Array<Promise<void>> = [];
private skipCSSLinks: Array<string> = []; // should be common for all pages
constructor(private readonly screen: Screen, private readonly mm: MessageManager) {}
constructor(private readonly screen: Screen, private readonly setLoading: (flag: boolean) => void) {}
reset():void {
this.linkLoadingCount = 0;
@ -38,7 +38,7 @@ export default class StylesManager {
const promise = new Promise<void>((resolve) => {
if (this.skipCSSLinks.includes(value)) resolve();
this.linkLoadingCount++;
this.mm.setCSSLoading(true);
this.setLoading(true);
const addSkipAndResolve = () => {
this.skipCSSLinks.push(value); // watch out
resolve()
@ -57,7 +57,7 @@ export default class StylesManager {
clearTimeout(timeoutId);
this.linkLoadingCount--;
if (this.linkLoadingCount === 0) {
this.mm.setCSSLoading(false);
this.setLoading(false);
}
});
this.linkLoadPromises.push(promise);

View file

@ -10,14 +10,18 @@ import DOMManager from './DOM/DOMManager';
export default class PagesManager extends ListWalker<DOMManager> {
private currentPage: DOMManager | null = null
constructor(private screen: Screen, private isMobile: boolean, private mm: MessageManager) { super() }
constructor(
private screen: Screen,
private isMobile: boolean,
private setCssLoading: ConstructorParameters<typeof DOMManager>[3],
) { super() }
/*
Assumed that messages added in a correct time sequence.
*/
appendMessage(m: Message): void {
if (m.tp === MType.CreateDocument) {
super.append(new DOMManager(this.screen, this.isMobile, m.time, this.mm))
super.append(new DOMManager(this.screen, this.isMobile, m.time, this.setCssLoading))
}
if (this.last === null) {
// Log wrong

View file

@ -4,26 +4,32 @@ import { Map } from 'immutable';
import indexReducer from './duck';
import apiMiddleware from './api_middleware';
import LocalStorage from './local_storage';
import { initialState as initUserState, UPDATE_JWT } from './duck/user'
const storage = new LocalStorage({
jwt: String,
user: Object,
});
const composeEnhancers = window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ && window.env.NODE_ENV === "development"
? window.__REDUX_DEVTOOLS_EXTENSION_COMPOSE__ : compose;
const storageState = storage.state();
const initialState = Map({
jwt: storageState.jwt,
// TODO: store user
});
const initialState = Map({ user: initUserState.update('jwt', () => storageState.user?.jwt || null) });
const store = createStore(indexReducer, initialState, composeEnhancers(applyMiddleware(thunk, apiMiddleware)));
store.subscribe(() => {
const state = store.getState();
storage.sync({
jwt: state.get('jwt')
user: state.get('user')
});
});
window.getJWT = () => {
console.log(JSON.stringify(storage.state().user?.jwt || 'not logged in'));
}
window.setJWT = (jwt) => {
store.dispatch({ type: UPDATE_JWT, data: jwt })
}
export default store;

View file

@ -8,7 +8,8 @@
# Example
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-git_sha}
ee="false"
check_prereq() {
which docker || {
@ -21,11 +22,14 @@ check_prereq() {
export DOCKER_BUILDKIT=1
function build(){
# Run docker as the same user, else we'll run in to permission issues.
docker build -t ${DOCKER_REPO:-'local'}/frontend:${git_sha1} --platform linux/amd64 --build-arg SERVICE_NAME=$image .
docker build -t ${DOCKER_REPO:-'local'}/frontend:${image_tag} --platform linux/amd64 --build-arg GIT_SHA=$git_sha .
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/frontend:${git_sha1}
docker push ${DOCKER_REPO:-'local'}/frontend:${image_tag}
}
echo "frotend build completed"
[[ $SIGN_IMAGE -eq 1 ]] && {
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/frontend:${image_tag}
}
echo "frontend build completed"
}
check_prereq

View file

@ -1,8 +1,11 @@
FROM node:18-alpine
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
ARG GIT_SHA
LABEL GIT_SHA=$GIT_SHA
RUN apk add --no-cache tini
ARG envarg
ENV PRIVATE_ENDPOINTS=false \
GIT_SHA=$GIT_SHA \
ENTERPRISE_BUILD=${envarg}
WORKDIR /work

View file

@ -6,7 +6,8 @@
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-git_sha}
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
@ -26,14 +27,17 @@ function build_api(){
[[ $1 == "ee" ]] && {
cp -rf ../ee/peers/* ./
}
docker build -f ./Dockerfile -t ${DOCKER_REPO:-'local'}/peers:${git_sha1} .
docker build -f ./Dockerfile --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/peers:${image_tag} .
cd ../peers
rm -rf ../${destination}
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/peers:${git_sha1}
docker tag ${DOCKER_REPO:-'local'}/peers:${git_sha1} ${DOCKER_REPO:-'local'}/peers:latest
docker push ${DOCKER_REPO:-'local'}/peers:${image_tag}
docker tag ${DOCKER_REPO:-'local'}/peers:${image_tag} ${DOCKER_REPO:-'local'}/peers:latest
docker push ${DOCKER_REPO:-'local'}/peers:latest
}
[[ $SIGN_IMAGE -eq 1 ]] && {
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/peers:${image_tag}
}
echo "peer docker build complted"
}

View file

@ -8,6 +8,12 @@ set -e
# Removing local alpine:latest image
docker rmi alpine || true
# Signing image
# cosign sign --key awskms:///alias/openreplay-container-sign image_url:tag
export SIGN_IMAGE=1
export PUSH_IMAGE=1
export SIGN_KEY="awskms:///alias/openreplay-container-sign"
echo $DOCKER_REPO
[[ -z DOCKER_REPO ]] && {
echo Set DOCKER_REPO="your docker registry"
@ -15,15 +21,15 @@ echo $DOCKER_REPO
} || {
docker login $DOCKER_REPO
cd ../../backend
PUSH_IMAGE=1 bash build.sh $@
bash build.sh $@
cd ../utilities
PUSH_IMAGE=1 bash build.sh $@
bash build.sh $@
cd ../peers
PUSH_IMAGE=1 bash build.sh $@
bash build.sh $@
cd ../frontend
PUSH_IMAGE=1 bash build.sh $@
bash build.sh $@
cd ../sourcemap-reader
PUSH_IMAGE=1 bash build.sh $@
bash build.sh $@
cd ../api
PUSH_IMAGE=1 bash build.sh $@
bash build.sh $@
}

View file

@ -7,12 +7,7 @@ set -eE -o pipefail # same as: `set -o errexit -o errtrace`
trap err EXIT
err() {
case "$?" in
0)
;;
*)
;;
esac
exit $1
}
# make all stderr red
@ -47,7 +42,7 @@ cat <<"EOF"
EOF
echo -e "${green}Usage: openreplay-cli [ -h | --help ]
echo -e "${green}Usage: [DEBUG=1|SKIP_MIGRAION=1] openreplay-cli [ -h | --help ]
[ -d | --status ]
[ -v | --verbose ]
[ -l | --logs SERVICE ]
@ -96,11 +91,10 @@ restart() {
helmInstall() {
[[ FORCE_UPGRADE_FRONTENT -eq 1 ]] && {
helm upgrade --install openreplay -n app openreplay -f vars.yaml --set forceUpgradeFrontend=true
} || {
helm upgrade --install openreplay -n app openreplay -f vars.yaml
}
# Adding variables
[[ $SKIP_MIGRATION -eq 1 ]] && ARGS="--set skipMigration=true"
[[ $DEBUG -eq 1 ]] && ARGS="$ARGS --debug"
helm upgrade --install openreplay -n app openreplay -f vars.yaml $ARGS
}
upgrade() {
@ -145,4 +139,4 @@ do
esac
done
[[ $VERBOSE -eq 1 ]] && set -x
[[ $VERBOSE -eq 1 ]] && set -x || true

View file

@ -1,73 +1,6 @@
# Default values for openreplay.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: nginx
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
securityContext:
runAsUser: 1001
runAsGroup: 1001
podSecurityContext:
runAsUser: 1001
runAsGroup: 1001
fsGroup: 1001
fsGroupChangePolicy: "OnRootMismatch"
# podSecurityContext: {}
# fsGroup: 2000
# securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}
redis: &redis
tls:
enabled: false
ingress-nginx:
enabled: true
@ -103,4 +36,5 @@ vault: &vault
global:
vault: *vault
redis: *redis
clusterDomain: "svc.cluster.local"

View file

@ -24,6 +24,16 @@ spec:
- keys:
publicKeys: |-
-----BEGIN PUBLIC KEY-----
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuSUrc90YHUpXwB2E7Hu080K6z+Yc
esqGVAEESg9lEjQUaxOUqRkW3nI/vXRQayLEfBs6ugPNqCH+DbuarI9Jkg==
MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAoLidzRiNIO3l/sWCYw2f
Ct71YSj7UVerhbR81TNEKYtW0fUqg4GagS+esprcXteHPoBI+ZcfL2xJIs0ZNHZs
A+2VXYrsFRgREtABFCwJ2G51ybusoS3jpBsAmSNjG0uzseDxQMTh0arNOlNbhbmI
Tj1ty2JfyLejDKlxavXheKmJGb+7IdDCMmP3f5mXSsJpsOM8SJo49BkvKhTwzjc0
01dsSLo5mk9jeG2C6UvPCQeMIUKaf5GlYWyFx7vLZ+z5be9TPuWDH4GO0RtxJVXt
tqmk32aKe+0KDLH0ak9WRVz3ugYEjs+tqdO3y3ALLoGAAI+yGxGSfWFDnDj5AXpA
2/XYSJAWRzPu35/H3laSrxaApYWN5an69jI30JY7SoEy/k+10oIGe2FGIihXTdq+
As3IKPEtvuN9s3RTm2ujV/7rEnVVKWiHvQCwH8rxhsbDTeJCoNs8hSBUq1Muttct
EWML8s/TCIK01PyvH6VNQSnc+lRKAJOd5NpZ/SVMXBbrykCQSZPE8RcaQum3nMxE
Tri24VcWfRHj1WwUYzxpmoVE5F1lw0lqQIXlwz+AFhCLGsePSkjFShFtNFQuX22r
Q73JTt3FX4JEzaaKC5BZwXmkEs3MVpQj43HuEqDyejlsPWwRBYwZIzXpoBhOCFHD
t4PI8n+1dSE+uavu/ijgXl8CAwEAAQ==
-----END PUBLIC KEY-----

View file

@ -110,7 +110,7 @@ global:
assistJWTSecret: "SetARandomStringHere"
s3:
region: "us-east-1"
endpoint: "http://minio.db.svc.cluster.local:9000"
endpoint: "http://minio.openreplay.svc.cluster.local:9000"
assetsBucket: "sessions-assets"
recordingsBucket: "mobs"
sourcemapsBucket: "sourcemaps"

View file

@ -1,5 +1,7 @@
FROM node:18-alpine
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
ARG GIT_SHA
LABEL GIT_SHA=$GIT_SHA
RUN apk add --no-cache tini
ARG envarg
@ -8,6 +10,7 @@ ENV SOURCE_MAP_VERSION=0.7.4 \
LISTEN_PORT=9000 \
MAPPING_WASM=/work/mappings.wasm \
PRIVATE_ENDPOINTS=true \
GIT_SHA=$GIT_SHA \
ENTERPRISE_BUILD=${envarg}
ADD https://unpkg.com/source-map@${SOURCE_MAP_VERSION}/lib/mappings.wasm ${MAPPING_WASM}

View file

@ -10,7 +10,8 @@ set -e
image_name="sourcemaps-reader"
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-git_sha}
envarg="default-foss"
tmp_folder_name="${image_name}_${RANDOM}"
@ -37,14 +38,17 @@ function build_api(){
envarg="default-ee"
tag="ee-"
}
docker build -f ./Dockerfile --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/${image_name}:${git_sha1} .
docker build -f ./Dockerfile --build-arg GIT_SHA=$git_sha --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/${image_name}:${image_tag} .
cd ../sourcemap-reader
rm -rf ../${destination}
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/${image_name}:${git_sha1}
docker tag ${DOCKER_REPO:-'local'}/${image_name}:${git_sha1} ${DOCKER_REPO:-'local'}/${image_name}:${tag}latest
docker push ${DOCKER_REPO:-'local'}/${image_name}:${image_tag}
docker tag ${DOCKER_REPO:-'local'}/${image_name}:${image_tag} ${DOCKER_REPO:-'local'}/${image_name}:${tag}latest
docker push ${DOCKER_REPO:-'local'}/${image_name}:${tag}latest
}
[[ $SIGN_IMAGE -eq 1 ]] && {
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/$image_name:${image_tag}
}
echo "${image_name} docker build completed"
}

View file

@ -1,11 +1,14 @@
FROM node:18-alpine
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
ARG GIT_SHA
LABEL GIT_SHA=$GIT_SHA
RUN apk add --no-cache tini git libc6-compat && ln -s /lib/libc.musl-x86_64.so.1 /lib/ld-linux-x86-64.so.2
ARG envarg
ENV ENTERPRISE_BUILD=${envarg} \
MAXMINDDB_FILE=/home/openreplay/geoip.mmdb \
PRIVATE_ENDPOINTS=false \
GIT_SHA=$GIT_SHA \
LISTEN_PORT=9001
WORKDIR /work
COPY package.json .

View file

@ -6,7 +6,8 @@
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-git_sha}
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
@ -26,15 +27,18 @@ function build_api(){
[[ $1 == "ee" ]] && {
cp -rf ../ee/utilities/* ./
}
docker build -f ./Dockerfile -t ${DOCKER_REPO:-'local'}/assist:${git_sha1} .
docker build -f ./Dockerfile --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/assist:${image_tag} .
cd ../utilities
rm -rf ../${destination}
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/assist:${git_sha1}
docker tag ${DOCKER_REPO:-'local'}/assist:${git_sha1} ${DOCKER_REPO:-'local'}/assist:latest
docker push ${DOCKER_REPO:-'local'}/assist:${image_tag}
docker tag ${DOCKER_REPO:-'local'}/assist:${image_tag} ${DOCKER_REPO:-'local'}/assist:latest
docker push ${DOCKER_REPO:-'local'}/assist:latest
}
[[ $SIGN_IMAGE -eq 1 ]] && {
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/assist:${image_tag}
}
echo "build completed for assist"
}