feat(assist-server): added a first part of the assist v2 (#3269)

This commit is contained in:
Alexander 2025-04-04 12:05:36 +02:00 committed by GitHub
parent 5a51bfb984
commit c1d51b98a2
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
23 changed files with 3166 additions and 0 deletions

121
.github/workflows/assist-server-ee.yaml vendored Normal file
View file

@ -0,0 +1,121 @@
# This action will push the assist changes to aws
on:
workflow_dispatch:
inputs:
skip_security_checks:
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
required: false
default: "false"
push:
branches:
- dev
paths:
- "ee/assist-server/**"
name: Build and Deploy Assist-Server EE
jobs:
deploy:
name: Deploy
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
# We need to diff with old commit
# to see which workers got changed.
fetch-depth: 2
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.EE_LICENSE_KEY }}
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.EE_PG_PASSWORD }}
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
name: Update Keys
- name: Docker login
run: |
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
- uses: azure/k8s-set-context@v1
with:
method: kubeconfig
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
- name: Building and Pushing Assist-Server image
id: build-image
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
ENVIRONMENT: staging
run: |
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
cd ee/assist-server
PUSH_IMAGE=0 bash -x ./build.sh ee
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("assist-server")
for image in ${images[*]};do
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {
exit $err_code
}
} && {
echo "Skipping Security Checks"
}
images=("assist-server")
for image in ${images[*]};do
docker push $DOCKER_REPO/$image:$IMAGE_TAG
done
- name: Creating old image input
run: |
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
# We've to strip off the -ee, as helm will append it.
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
EOF
done
- name: Deploy to kubernetes
run: |
cd ../../scripts/helmcharts/
# Update changed image tag
sed -i "/assist-server/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
mkdir -p /tmp/charts
mv openreplay/charts/{ingress-nginx,assist-server,quickwit,connector} /tmp/charts/
rm -rf openreplay/charts/*
mv /tmp/charts/* openreplay/charts/
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# We're not passing -ee flag, because helm will add that.
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging

5
ee/assist-server/.gitignore vendored Normal file
View file

@ -0,0 +1,5 @@
.idea
node_modules
npm-debug.log
.cache
*.mmdb

View file

@ -0,0 +1,24 @@
ARG ARCH=amd64
FROM --platform=linux/$ARCH node:23-alpine
LABEL Maintainer="Zavorotynskiy Alexander <zavorotynskiy@pm.me>"
RUN apk add --no-cache tini git libc6-compat
ARG envarg
ENV ENTERPRISE_BUILD=${envarg} \
MAXMINDDB_FILE=/home/openreplay/geoip.mmdb \
PRIVATE_ENDPOINTS=false \
LISTEN_PORT=9001 \
ERROR=1 \
NODE_ENV=production
WORKDIR /work
COPY package.json .
COPY package-lock.json .
RUN npm install
COPY . .
RUN adduser -u 1001 openreplay -D
USER 1001
ADD --chown=1001 https://static.openreplay.com/geoip/GeoLite2-City.mmdb $MAXMINDDB_FILE
ENTRYPOINT ["/sbin/tini", "--"]
CMD npm start

View file

@ -0,0 +1,168 @@
const jwt = require('jsonwebtoken');
const uaParser = require('ua-parser-js');
const {geoip} = require('./geoIP');
const {logger} = require('./logger');
let PROJECT_KEY_LENGTH = parseInt(process.env.PROJECT_KEY_LENGTH) || 20;
const IDENTITIES = {agent: 'agent', session: 'session'};
const EVENTS_DEFINITION = {
listen: {
UPDATE_EVENT: "UPDATE_SESSION", // tab become active/inactive, page title change, changed session object (rare case), call start/end
CONNECT_ERROR: "connect_error",
CONNECT_FAILED: "connect_failed",
ERROR: "error"
},
//The following list of events will be only emitted by the server
server: {
UPDATE_SESSION: "SERVER_UPDATE_SESSION"
}
};
EVENTS_DEFINITION.emit = {
NEW_AGENT: "NEW_AGENT",
NO_AGENTS: "NO_AGENT",
AGENT_DISCONNECT: "AGENT_DISCONNECTED",
AGENTS_CONNECTED: "AGENTS_CONNECTED",
NO_SESSIONS: "SESSION_DISCONNECTED",
SESSION_ALREADY_CONNECTED: "SESSION_ALREADY_CONNECTED",
SESSION_RECONNECTED: "SESSION_RECONNECTED",
UPDATE_EVENT: EVENTS_DEFINITION.listen.UPDATE_EVENT
};
const BASE_sessionInfo = {
"pageTitle": "Page",
"active": false,
"live": true,
"sessionID": "0",
"metadata": {},
"userID": "",
"userUUID": "",
"projectKey": "",
"revID": "",
"timestamp": 0,
"trackerVersion": "",
"isSnippet": true,
"userOs": "",
"userBrowser": "",
"userBrowserVersion": "",
"userDevice": "",
"userDeviceType": "",
"userCountry": "",
"userState": "",
"userCity": "",
"projectId": 0
};
const extractPeerId = (peerId) => {
const parts = peerId.split("-");
if (parts.length < 2 || parts.length > 3) {
logger.debug(`Invalid peerId format: ${peerId}`);
return {};
}
if (PROJECT_KEY_LENGTH > 0 && parts[0].length !== PROJECT_KEY_LENGTH) {
logger.debug(`Invalid project key length in peerId: ${peerId}`);
return {};
}
const [projectKey, sessionId, tabId = generateRandomTabId()] = parts;
return { projectKey, sessionId, tabId };
};
const generateRandomTabId = () => (Math.random() + 1).toString(36).substring(2);
function processPeerInfo(socket) {
socket._connectedAt = new Date();
const { projectKey, sessionId, tabId } = extractPeerId(socket.handshake.query.peerId || "");
Object.assign(socket.handshake.query, {
roomId: projectKey && sessionId ? `${projectKey}-${sessionId}` : null,
projectKey,
sessId: sessionId,
tabId
});
logger.debug(`Connection details: projectKey:${projectKey}, sessionId:${sessionId}, tabId:${tabId}, roomId:${socket.handshake.query.roomId}`);
}
/**
* extracts and populate socket with information
* @Param {socket} used socket
* */
const extractSessionInfo = function (socket) {
if (socket.handshake.query.sessionInfo !== undefined) {
logger.debug(`received headers: ${socket.handshake.headers}`);
socket.handshake.query.sessionInfo = JSON.parse(socket.handshake.query.sessionInfo);
socket.handshake.query.sessionInfo = {...BASE_sessionInfo, ...socket.handshake.query.sessionInfo};
let ua = uaParser(socket.handshake.headers['user-agent']);
socket.handshake.query.sessionInfo.userOs = ua.os.name || null;
socket.handshake.query.sessionInfo.userBrowser = ua.browser.name || null;
socket.handshake.query.sessionInfo.userBrowserVersion = ua.browser.version || null;
socket.handshake.query.sessionInfo.userDevice = ua.device.model || null;
socket.handshake.query.sessionInfo.userDeviceType = ua.device.type || 'desktop';
socket.handshake.query.sessionInfo.userCountry = null;
socket.handshake.query.sessionInfo.userState = null;
socket.handshake.query.sessionInfo.userCity = null;
if (geoip() !== null) {
logger.debug(`looking for location of ${socket.handshake.headers['x-forwarded-for'] || socket.handshake.address}`);
try {
let ip = socket.handshake.headers['x-forwarded-for'] || socket.handshake.address;
ip = ip.split(",")[0];
let info = geoip().city(ip);
socket.handshake.query.sessionInfo.userCountry = info.country.isoCode;
socket.handshake.query.sessionInfo.userCity = info.city.names.en;
socket.handshake.query.sessionInfo.userState = info.subdivisions.length > 0 ? info.subdivisions[0].names.en : null;
} catch (e) {
logger.debug(`geoip-country failed: ${e}`);
}
}
}
}
function errorHandler(listenerName, error) {
logger.error(`Error detected from ${listenerName}\n${error}`);
}
const JWT_TOKEN_PREFIX = "Bearer ";
function check(socket, next) {
if (socket.handshake.query.identity === IDENTITIES.session) {
return next();
}
if (socket.handshake.query.peerId && socket.handshake.auth && socket.handshake.auth.token) {
let token = socket.handshake.auth.token;
if (token.startsWith(JWT_TOKEN_PREFIX)) {
token = token.substring(JWT_TOKEN_PREFIX.length);
}
jwt.verify(token, process.env.ASSIST_JWT_SECRET, (err, decoded) => {
logger.debug(`JWT payload: ${decoded}`);
if (err) {
logger.debug(err);
return next(new Error('Authentication error'));
}
const {projectKey, sessionId} = extractPeerId(socket.handshake.query.peerId);
if (!projectKey || !sessionId) {
logger.debug(`Missing attribute: projectKey:${projectKey}, sessionId:${sessionId}`);
return next(new Error('Authentication error'));
}
if (String(projectKey) !== String(decoded.projectKey) || String(sessionId) !== String(decoded.sessionId)) {
logger.debug(`Trying to access projectKey:${projectKey} instead of ${decoded.projectKey} or
to sessionId:${sessionId} instead of ${decoded.sessionId}`);
return next(new Error('Authorization error'));
}
socket.decoded = decoded;
return next();
});
} else {
logger.debug(`something missing in handshake: ${socket.handshake}`);
return next(new Error('Authentication error'));
}
}
module.exports = {
processPeerInfo,
extractPeerId,
extractSessionInfo,
EVENTS_DEFINITION,
IDENTITIES,
errorHandler,
authorizer: {check}
};

View file

@ -0,0 +1,106 @@
const {logger} = require('./logger');
const {createClient} = require("redis");
const crypto = require("crypto");
let redisClient;
const REDIS_URL = (process.env.REDIS_URL || "localhost:6379").replace(/((^\w+:|^)\/\/|^)/, 'redis://');
redisClient = createClient({url: REDIS_URL});
redisClient.on("error", (error) => logger.error(`Redis cache error : ${error}`));
void redisClient.connect();
function generateNodeID() {
const buffer = crypto.randomBytes(8);
return "node_"+buffer.readBigUInt64BE(0).toString();
}
const pingInterval = parseInt(process.env.PING_INTERVAL) || 25000;
const CACHE_REFRESH_INTERVAL = parseInt(process.env.cacheRefreshInterval) || 10000;
let lastCacheUpdateTime = 0;
let cacheRefresher = null;
const nodeID = process.env.HOSTNAME || generateNodeID();
const addSessionToCache = async function (sessionID, sessionData) {
try {
await redisClient.set(`active_sessions:${sessionID}`, JSON.stringify(sessionData), 'EX', pingInterval*2);
logger.debug(`Session ${sessionID} stored in Redis`);
} catch (error) {
logger.error(error);
}
}
const renewSession = async function (sessionID){
try {
await redisClient.expire(`active_sessions:${sessionID}`, pingInterval*2);
logger.debug(`Session ${sessionID} renewed in Redis`);
} catch (error) {
logger.error(error);
}
}
const getSessionFromCache = async function (sessionID) {
try {
const sessionData = await redisClient.get(`active_sessions:${sessionID}`);
if (sessionData) {
logger.debug(`Session ${sessionID} retrieved from Redis`);
return JSON.parse(sessionData);
}
return null;
} catch (error) {
logger.error(error);
return null;
}
}
const removeSessionFromCache = async function (sessionID) {
try {
await redisClient.del(`active_sessions:${sessionID}`);
logger.debug(`Session ${sessionID} removed from Redis`);
} catch (error) {
logger.error(error);
}
}
const setNodeSessions = async function (nodeID, sessionIDs) {
try {
await redisClient.set(`node:${nodeID}:sessions`, JSON.stringify(sessionIDs), 'EX', CACHE_REFRESH_INTERVAL*2);
logger.debug(`Node ${nodeID} sessions stored in Redis`);
} catch (error) {
logger.error(error);
}
}
function startCacheRefresher(io) {
if (cacheRefresher) clearInterval(cacheRefresher);
cacheRefresher = setInterval(async () => {
const now = Date.now();
if (now - lastCacheUpdateTime < CACHE_REFRESH_INTERVAL) {
return;
}
logger.debug('Background refresh triggered');
try {
const startTime = performance.now();
const sessionIDs = new Set();
const result = await io.fetchSockets();
result.forEach((r) => {
if (r.handshake.query.sessionID) {
sessionIDs.add(r.handshake.query.sessionID);
}
})
await setNodeSessions(nodeID, Array.from(sessionIDs));
lastCacheUpdateTime = now;
const duration = performance.now() - startTime;
logger.info(`Background refresh complete: ${duration}ms, ${result.length} sockets`);
} catch (error) {
logger.error(`Background refresh error: ${error}`);
}
}, CACHE_REFRESH_INTERVAL / 2);
}
module.exports = {
addSessionToCache,
renewSession,
getSessionFromCache,
removeSessionFromCache,
startCacheRefresher,
}

View file

@ -0,0 +1,21 @@
const geoip2Reader = require('@maxmind/geoip2-node').Reader;
const {logger} = require('./logger');
let geoip = null;
if (process.env.MAXMINDDB_FILE !== undefined) {
geoip2Reader.open(process.env.MAXMINDDB_FILE, {})
.then(reader => {
geoip = reader;
})
.catch(error => {
logger.error(`Error while opening the MAXMINDDB_FILE, err: ${error}`);
});
} else {
logger.error("!!! please provide a valid value for MAXMINDDB_FILE env var.");
}
module.exports = {
geoip: () => {
return geoip;
}
}

View file

@ -0,0 +1,23 @@
const winston = require('winston');
const isDebugMode = process.env.debug === "1";
const logLevel = isDebugMode ? 'debug' : 'info';
const logger = winston.createLogger({
level: logLevel,
format: winston.format.combine(
winston.format.timestamp({
format: 'YYYY-MM-DD HH:mm:ss.SSS' // The same format as in backend services
}),
winston.format.errors({stack: true}),
winston.format.json()
),
defaultMeta: {service: process.env.SERVICE_NAME || 'assist'},
transports: [
new winston.transports.Console(),
],
});
module.exports = {
logger,
}

View file

@ -0,0 +1,254 @@
const {
processPeerInfo,
IDENTITIES,
EVENTS_DEFINITION,
extractSessionInfo,
errorHandler
} = require("./assist");
const {
addSessionToCache,
renewSession,
removeSessionFromCache
} = require('./cache');
const {
logger
} = require('./logger');
const deepMerge = require('@fastify/deepmerge')({all: true});
let io;
const setSocketIOServer = function (server) {
io = server;
}
function sendFrom(from, to, eventName, ...data) {
from.to(to).emit(eventName, ...data);
}
function sendTo(to, eventName, ...data) {
sendFrom(io, to, eventName, ...data);
}
const fetchSockets = async function (roomID) {
if (!io) {
return [];
}
try {
if (roomID) {
return await io.in(roomID).fetchSockets();
} else {
return await io.fetchSockets();
}
} catch (error) {
logger.error('Error fetching sockets:', error);
return [];
}
}
const findSessionSocketId = async (roomId, tabId) => {
let pickFirstSession = tabId === undefined;
const connected_sockets = await fetchSockets(roomId);
for (let socket of connected_sockets) {
if (socket.handshake.query.identity === IDENTITIES.session) {
if (pickFirstSession) {
return socket.id;
} else if (socket.handshake.query.tabId === tabId) {
return socket.id;
}
}
}
return null;
};
async function getRoomData(roomID) {
let tabsCount = 0, agentsCount = 0, tabIDs = [], agentIDs = [];
const connected_sockets = await fetchSockets(roomID);
if (connected_sockets.length > 0) {
for (let socket of connected_sockets) {
if (socket.handshake.query.identity === IDENTITIES.session) {
tabsCount++;
tabIDs.push(socket.handshake.query.tabId);
} else {
agentsCount++;
agentIDs.push(socket.id);
}
}
} else {
tabsCount = -1;
agentsCount = -1;
}
return {tabsCount, agentsCount, tabIDs, agentIDs};
}
async function onConnect(socket) {
logger.debug(`A new client:${socket.id}, Query:${JSON.stringify(socket.handshake.query)}`);
// Drop unknown socket.io connections
if (socket.handshake.query.identity === undefined || socket.handshake.query.peerId === undefined || socket.handshake.query.sessionInfo === undefined) {
logger.debug(`something is undefined, refusing connexion`);
return socket.disconnect();
}
processPeerInfo(socket);
const {tabsCount, agentsCount, tabIDs, agentIDs} = await getRoomData(socket.handshake.query.roomId);
if (socket.handshake.query.identity === IDENTITIES.session) {
// Check if session with the same tabID already connected, if so, refuse new connexion
if (tabsCount > 0) {
for (let tab of tabIDs) {
if (tab === socket.handshake.query.tabId) {
logger.debug(`session already connected, refusing new connexion, peerId: ${socket.handshake.query.peerId}`);
sendTo(socket.id, EVENTS_DEFINITION.emit.SESSION_ALREADY_CONNECTED);
return socket.disconnect();
}
}
}
extractSessionInfo(socket);
if (tabsCount < 0) {
// New session creates new room
}
// Inform all connected agents about reconnected session
if (agentsCount > 0) {
logger.debug(`notifying new session about agent-existence`);
sendTo(socket.id, EVENTS_DEFINITION.emit.AGENTS_CONNECTED, agentIDs);
sendFrom(socket, socket.handshake.query.roomId, EVENTS_DEFINITION.emit.SESSION_RECONNECTED, socket.id);
}
} else if (tabsCount <= 0) {
logger.debug(`notifying new agent about no SESSIONS with peerId:${socket.handshake.query.peerId}`);
sendTo(socket.id, EVENTS_DEFINITION.emit.NO_SESSIONS);
}
await socket.join(socket.handshake.query.roomId);
logger.debug(`${socket.id} joined room:${socket.handshake.query.roomId}, as:${socket.handshake.query.identity}, connections:${agentsCount + tabsCount + 1}`)
// Add session to cache
if (socket.handshake.query.identity === IDENTITIES.session) {
await addSessionToCache(socket.handshake.query.sessId, socket.handshake.query.sessionInfo);
}
if (socket.handshake.query.identity === IDENTITIES.agent) {
if (socket.handshake.query.agentInfo !== undefined) {
socket.handshake.query.agentInfo = JSON.parse(socket.handshake.query.agentInfo);
socket.handshake.query.agentID = socket.handshake.query.agentInfo.id;
}
sendFrom(socket, socket.handshake.query.roomId, EVENTS_DEFINITION.emit.NEW_AGENT, socket.id, socket.handshake.query.agentInfo);
}
socket.conn.on("packet", (packet) => {
if (packet.type === 'pong') {
renewSession(socket.handshake.query.sessId);
}
});
// Set disconnect handler
socket.on('disconnect', () => onDisconnect(socket));
// Handle update event
socket.on(EVENTS_DEFINITION.listen.UPDATE_EVENT, (...args) => onUpdateEvent(socket, ...args));
// Handle webrtc events
socket.on(EVENTS_DEFINITION.listen.WEBRTC_AGENT_CALL, (...args) => onWebrtcAgentHandler(socket, ...args));
// Handle errors
socket.on(EVENTS_DEFINITION.listen.ERROR, err => errorHandler(EVENTS_DEFINITION.listen.ERROR, err));
socket.on(EVENTS_DEFINITION.listen.CONNECT_ERROR, err => errorHandler(EVENTS_DEFINITION.listen.CONNECT_ERROR, err));
socket.on(EVENTS_DEFINITION.listen.CONNECT_FAILED, err => errorHandler(EVENTS_DEFINITION.listen.CONNECT_FAILED, err));
// Handle all other events (usually dom's mutations and user's actions)
socket.onAny((eventName, ...args) => onAny(socket, eventName, ...args));
}
async function onDisconnect(socket) {
logger.debug(`${socket.id} disconnected from ${socket.handshake.query.roomId}`);
if (socket.handshake.query.identity === IDENTITIES.agent) {
sendFrom(socket, socket.handshake.query.roomId, EVENTS_DEFINITION.emit.AGENT_DISCONNECT, socket.id);
}
logger.debug("checking for number of connected agents and sessions");
let {tabsCount, agentsCount, tabIDs, agentIDs} = await getRoomData(socket.handshake.query.roomId);
if (tabsCount <= 0) {
await removeSessionFromCache(socket.handshake.query.sessId);
}
if (tabsCount === -1 && agentsCount === -1) {
logger.debug(`room not found: ${socket.handshake.query.roomId}`);
return;
}
if (tabsCount === 0) {
logger.debug(`notifying everyone in ${socket.handshake.query.roomId} about no SESSIONS`);
sendFrom(socket, socket.handshake.query.roomId, EVENTS_DEFINITION.emit.NO_SESSIONS);
}
if (agentsCount === 0) {
logger.debug(`notifying everyone in ${socket.handshake.query.roomId} about no AGENTS`);
sendFrom(socket, socket.handshake.query.roomId, EVENTS_DEFINITION.emit.NO_AGENTS);
}
}
async function onUpdateEvent(socket, ...args) {
logger.debug(`${socket.id} sent update event.`);
if (socket.handshake.query.identity !== IDENTITIES.session) {
logger.debug('Ignoring update event.');
return
}
args[0] = updateSessionData(socket, args[0])
socket.handshake.query.sessionInfo = deepMerge(socket.handshake.query.sessionInfo, args[0]?.data, {tabId: args[0]?.meta?.tabId});
// update session cache
await addSessionToCache(socket.handshake.query.sessId, socket.handshake.query.sessionInfo);
// Update sessionInfo for all agents in the room
const connected_sockets = await fetchSockets(socket.handshake.query.roomId);
for (let item of connected_sockets) {
if (item.handshake.query.identity === IDENTITIES.session && item.handshake.query.sessionInfo) {
item.handshake.query.sessionInfo = deepMerge(item.handshake.query.sessionInfo, args[0]?.data, {tabId: args[0]?.meta?.tabId});
} else if (item.handshake.query.identity === IDENTITIES.agent) {
sendFrom(socket, item.id, EVENTS_DEFINITION.emit.UPDATE_EVENT, args[0]);
}
}
}
async function onWebrtcAgentHandler(socket, ...args) {
if (socket.handshake.query.identity === IDENTITIES.agent) {
const agentIdToConnect = args[0]?.data?.toAgentId;
logger.debug(`${socket.id} sent webrtc event to agent:${agentIdToConnect}`);
if (agentIdToConnect && socket.handshake.sessionData.AGENTS_CONNECTED.includes(agentIdToConnect)) {
sendFrom(socket, agentIdToConnect, EVENTS_DEFINITION.listen.WEBRTC_AGENT_CALL, args[0]);
}
}
}
async function onAny(socket, eventName, ...args) {
if (Object.values(EVENTS_DEFINITION.listen).indexOf(eventName) >= 0) {
logger.debug(`received event:${eventName}, should be handled by another listener, stopping onAny.`);
return
}
args[0] = updateSessionData(socket, args[0])
if (socket.handshake.query.identity === IDENTITIES.session) {
logger.debug(`received event:${eventName}, from:${socket.handshake.query.identity}, sending message to room:${socket.handshake.query.roomId}`);
sendFrom(socket, socket.handshake.query.roomId, eventName, args[0]);
} else {
logger.debug(`received event:${eventName}, from:${socket.handshake.query.identity}, sending message to session of room:${socket.handshake.query.roomId}`);
let socketId = await findSessionSocketId(socket.handshake.query.roomId, args[0]?.meta?.tabId);
if (socketId === null) {
logger.debug(`session not found for:${socket.handshake.query.roomId}`);
sendTo(socket.id, EVENTS_DEFINITION.emit.NO_SESSIONS);
} else {
logger.debug("message sent");
sendTo(socket.id, eventName, socket.id, args[0]);
}
}
}
// Back compatibility (add top layer with meta information)
function updateSessionData(socket, sessionData) {
if (sessionData?.meta === undefined && socket.handshake.query.identity === IDENTITIES.session) {
sessionData = {meta: {tabId: socket.handshake.query.tabId, version: 1}, data: sessionData};
}
return sessionData
}
module.exports = {
onConnect,
setSocketIOServer,
}

65
ee/assist-server/build.sh Normal file
View file

@ -0,0 +1,65 @@
#!/bin/bash
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
git_sha=$(git rev-parse --short HEAD)
image_tag=${IMAGE_TAG:-git_sha}
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
exit 1
}
}
source ../scripts/lib/_docker.sh
[[ $1 == ee ]] && ee=true
[[ $PATCH -eq 1 ]] && {
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
[[ $ee == "true" ]] && {
image_tag="${image_tag}-ee"
}
}
update_helm_release() {
chart=$1
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
# Update the chart version
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Update image tags
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
# Commit the changes
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
git commit -m "chore(helm): Updating $chart image release"
}
function build_api() {
destination="_assist-server"
[[ $1 == "ee" ]] && {
destination="_assist-server_ee"
}
[[ -d ../${destination} ]] && {
echo "Removing previous build cache"
rm -rf ../${destination}
}
cp -R ../ee/assist-server ../${destination}
cd ../${destination}
docker build -f ./Dockerfile --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/assist-server:${image_tag} .
cd ../assist
rm -rf ../${destination}
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/assist-server:${image_tag}
docker tag ${DOCKER_REPO:-'local'}/assist-server:${image_tag} ${DOCKER_REPO:-'local'}/assist-server:latest
docker push ${DOCKER_REPO:-'local'}/assist-server:latest
}
[[ $SIGN_IMAGE -eq 1 ]] && {
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/assist-server:${image_tag}
}
echo "build completed for assist-server"
}
check_prereq
build_api $1
if [[ $PATCH -eq 1 ]]; then
update_helm_release assist-server
fi

1761
ee/assist-server/package-lock.json generated Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,24 @@
{
"name": "assist-server",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "",
"license": "ISC",
"dependencies": {
"@fastify/deepmerge": "^3.0.0",
"@maxmind/geoip2-node": "^6.0.0",
"express": "^4.21.2",
"jsonwebtoken": "^9.0.2",
"redis": "^4.7.0",
"socket.io": "^4.8.1",
"socket.io-client": "^4.8.1",
"ua-parser-js": "^2.0.3",
"uWebSockets.js": "github:uNetworking/uWebSockets.js#v20.51.0",
"winston": "^3.17.0"
}
}

View file

@ -0,0 +1,75 @@
const { App } = require('uWebSockets.js');
const { Server } = require('socket.io');
const { logger } = require("./app/logger");
const { authorizer } = require("./app/assist");
const { onConnect, setSocketIOServer } = require("./app/socket");
const { startCacheRefresher } = require("./app/cache");
// Create uWebSockets.js app
const app = App();
const prefix = process.env.PREFIX || process.env.prefix || `/assist`;
const pingInterval = parseInt(process.env.PING_INTERVAL) || 5000;
const getCompressionConfig = function () {
// WS: The theoretical overhead per socket is 19KB (11KB for compressor and 8KB for decompressor)
let perMessageDeflate = false;
if (process.env.COMPRESSION === "true") {
logger.info(`WS compression: enabled`);
perMessageDeflate = {
zlibDeflateOptions: {
windowBits: 10,
memLevel: 1
},
zlibInflateOptions: {
windowBits: 10
}
}
} else {
logger.info(`WS compression: disabled`);
}
return {
perMessageDeflate: perMessageDeflate,
clientNoContextTakeover: true
};
}
// Create a Socket.IO server with uWebSockets.js adapter
const io = new Server({
maxHttpBufferSize: (parseFloat(process.env.maxHttpBufferSize) || 5) * 1e6,
pingInterval: pingInterval, // Will use it for cache invalidation
cors: {
origin: "*", // Allow connections from any origin (for development)
methods: ["GET", "POST"],
credentials: true
},
path: (prefix ? prefix : '') + '/socket',
...getCompressionConfig()
});
// Middleware for Socket.IO to check authorization
io.use(async (socket, next) => await authorizer.check(socket, next));
// Socket.IO connection handler
io.on('connection', (socket) => onConnect(socket));
// Attach Socket.IO to uWebSockets.js
io.attachApp(app);
io.engine.on("headers", (headers) => {
headers["x-host-id"] = process.env.HOSTNAME || "unknown";
});
setSocketIOServer(io);
// Start the server
const PORT = process.env.PORT || 3000;
app.listen(PORT, (token) => {
if (token) {
console.log(`Server running at http://localhost:${PORT}`);
} else {
console.log(`Failed to listen on port ${PORT}`);
}
});
startCacheRefresher(io);
// Error handling for uncaught exceptions
process.on('uncaughtException', err => {
logger.error(`Uncaught Exception: ${err}`);
});

View file

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View file

@ -0,0 +1,24 @@
apiVersion: v2
name: assist-server
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful assist-server or functions for the chart developer. They're included as
# a dependency of application charts to inject those assist-server and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.1
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
AppVersion: "v1.22.0"

View file

@ -0,0 +1,22 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "assist-server.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "assist-server.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "assist-server.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "assist-server.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View file

@ -0,0 +1,65 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "assist-server.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "assist-server.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "assist-server.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "assist-server.labels" -}}
helm.sh/chart: {{ include "assist-server.chart" . }}
{{ include "assist-server.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- if .Values.global.appLabels }}
{{- .Values.global.appLabels | toYaml | nindent 0}}
{{- end}}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "assist-server.selectorLabels" -}}
app.kubernetes.io/name: {{ include "assist-server.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "assist-server.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "assist-server.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,113 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "assist-server.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "assist-server.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector:
matchLabels:
{{- include "assist-server.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "assist-server.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "assist-server.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
shareProcessNamespace: true
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
{{- if .Values.global.enterpriseEditionLicense }}
image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee"
{{- else }}
image: "{{ tpl .Values.image.repository . }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
{{- end }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.healthCheck}}
{{- .Values.healthCheck | toYaml | nindent 10}}
{{- end}}
env:
- name: ASSIST_JWT_SECRET
value: {{ .Values.global.assist-serverJWTSecret }}
- name: ASSIST_KEY
value: {{ .Values.global.assist-serverKey }}
- name: AWS_DEFAULT_REGION
value: "{{ .Values.global.s3.region }}"
- name: S3_HOST
{{- if contains "minio" .Values.global.s3.endpoint }}
value: '{{ ternary "https" "http" .Values.global.ORSecureAccess}}://{{ .Values.global.domainName }}:{{ ternary .Values.global.ingress.controller.service.ports.https .Values.global.ingress.controller.service.ports.http .Values.global.ORSecureAccess }}'
{{- else}}
value: '{{ .Values.global.s3.endpoint }}'
{{- end}}
- name: S3_KEY
{{- if .Values.global.s3.existingSecret }}
valueFrom:
secretKeyRef:
name: {{ .Values.global.s3.existingSecret }}
key: access-key
{{- else }}
value: {{ .Values.global.s3.accessKey }}
{{- end }}
- name: S3_SECRET
{{- if .Values.global.s3.existingSecret }}
valueFrom:
secretKeyRef:
name: {{ .Values.global.s3.existingSecret }}
key: secret-key
{{- else }}
value: {{ .Values.global.s3.secretKey }}
{{- end }}
- name: REDIS_URL
value: {{ .Values.global.redis.redisHost }}
{{- range $key, $val := .Values.global.env }}
- name: {{ $key }}
value: '{{ $val }}'
{{- end }}
{{- range $key, $val := .Values.env }}
- name: {{ $key }}
value: '{{ $val }}'
{{- end}}
ports:
{{- range $key, $val := .Values.service.ports }}
- name: {{ $key }}
containerPort: {{ $val }}
{{- end }}
protocol: TCP
{{- with .Values.persistence.mounts }}
volumeMounts:
{{- toYaml . | nindent 12 }}
{{- end }}
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.persistence.volumes }}
volumes:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View file

@ -0,0 +1,33 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "assist-server.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "assist-server.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "assist-server.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,56 @@
{{- if .Values.ingress.enabled }}
{{- $fullName := include "assist-server.fullname" . -}}
{{- $socketioSvcPort := .Values.service.ports.socketio -}}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "assist-server.labels" . | nindent 4 }}
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/configuration-snippet: |
#set $sticky_used "no";
#if ($sessionid != "") {
# set $sticky_used "yes";
#}
#add_header X-Debug-Session-ID $sessionid;
#add_header X-Debug-Session-Type "wss";
#add_header X-Sticky-Session-Used $sticky_used;
#add_header X-Upstream-Server $upstream_addr;
proxy_hide_header access-control-allow-headers;
proxy_hide_header Access-Control-Allow-Origin;
add_header 'Access-Control-Allow-Origin' $http_origin always;
add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS' always;
add_header 'Access-Control-Allow-Headers' 'sessionid, Content-Type, Authorization' always;
add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain charset=UTF-8';
nginx.ingress.kubernetes.io/upstream-hash-by: $sessionid
{{- with .Values.ingress.annotations }}
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
ingressClassName: "{{ tpl .Values.ingress.className . }}"
tls:
- hosts:
- {{ .Values.global.domainName }}
{{- if .Values.ingress.tls.secretName}}
secretName: {{ .Values.ingress.tls.secretName }}
{{- end}}
rules:
- host: {{ .Values.global.domainName }}
http:
paths:
- pathType: Prefix
backend:
service:
name: {{ $fullName }}
port:
number: {{ $socketioSvcPort }}
path: /ws-assist-server/(.*)
{{- end }}

View file

@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "assist-server.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "assist-server.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
{{- range $key, $val := .Values.service.ports }}
- port: {{ $val }}
targetPort: {{ $key }}
protocol: TCP
name: {{ $key }}
{{- end}}
selector:
{{- include "assist-server.selectorLabels" . | nindent 4 }}

View file

@ -0,0 +1,18 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.serviceMonitor.enabled ) }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "assist-server.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "assist-server.labels" . | nindent 4 }}
{{- if .Values.serviceMonitor.additionalLabels }}
{{- toYaml .Values.serviceMonitor.additionalLabels | nindent 4 }}
{{- end }}
spec:
endpoints:
{{- .Values.serviceMonitor.scrapeConfigs | toYaml | nindent 4 }}
selector:
matchLabels:
{{- include "assist-server.selectorLabels" . | nindent 6 }}
{{- end }}

View file

@ -0,0 +1,13 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "assist-server.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "assist-server.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,134 @@
# Default values for openreplay.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: "{{ .Values.global.openReplayContainerRegistry }}/assist-server"
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: "assist-server"
fullnameOverride: "assist-server-openreplay"
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
securityContext:
runAsUser: 1001
runAsGroup: 1001
podSecurityContext:
runAsUser: 1001
runAsGroup: 1001
fsGroup: 1001
fsGroupChangePolicy: "OnRootMismatch"
# podSecurityContext: {}
# fsGroup: 2000
# securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
#service:
# type: ClusterIP
# port: 9000
serviceMonitor:
enabled: false
additionalLabels:
release: observability
scrapeConfigs:
- port: metrics
honorLabels: true
interval: 15s
path: /metrics
scheme: http
scrapeTimeout: 10s
service:
type: ClusterIP
ports:
socketio: 9001
metrics: 8888
ingress:
enabled: true
className: "{{ .Values.global.ingress.controller.ingressClassResource.name }}"
annotations:
nginx.ingress.kubernetes.io/configuration-snippet: |
add_header X-Debug-Session-ID $http_sessionid;
add_header X-Debug-Session-Type "wss";
# CORS configuration
# We don't need the upstream header
proxy_hide_header Access-Control-Allow-Origin;
add_header 'Access-Control-Allow-Origin' $http_origin always;
add_header 'Access-Control-Allow-Methods' 'GET, OPTIONS' always;
add_header 'Access-Control-Allow-Headers' 'sessionid, Content-Type, Authorization' always;
add_header 'Access-Control-Max-Age' 1728000;
add_header 'Content-Type' 'text/plain charset=UTF-8';
nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
tls:
secretName: openreplay-ssl
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 5
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
env:
debug: 0
uws: false
redis: false
CLEAR_SOCKET_TIME: 720
nodeSelector: {}
tolerations: []
affinity: {}
persistence: {}
# # Spec of spec.template.spec.containers[*].volumeMounts
# mounts:
# - name: kafka-ssl
# mountPath: /opt/kafka/ssl
# # Spec of spec.template.spec.volumes
# volumes:
# - name: kafka-ssl
# secret:
# secretName: kafka-ssl