feat: init backend
This commit is contained in:
parent
8d5bef140f
commit
83bc0ac783
137 changed files with 32177 additions and 0 deletions
51
backend/Dockerfile
Normal file
51
backend/Dockerfile
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
FROM golang:1.13-alpine AS build
|
||||
|
||||
RUN apk add --no-cache git openssh openssl-dev pkgconf gcc g++ make libc-dev bash
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
COPY go.mod .
|
||||
COPY go.sum .
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
ARG SERVICE_NAME
|
||||
|
||||
RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o service -tags musl openreplay/backend/services/$SERVICE_NAME
|
||||
|
||||
FROM alpine
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
ENV TZ=UTC \
|
||||
FS_ULIMIT=1000 \
|
||||
FS_DIR=/mnt/efs \
|
||||
MAXMINDDB_FILE=/root/geoip.mmdb \
|
||||
UAPARSER_FILE=/root/regexes.yaml \
|
||||
HTTP_PORT=80 \
|
||||
KAFKA_USE_SSL=true \
|
||||
REDIS_STREAMS_MAX_LEN=3000 \
|
||||
TOPIC_RAW=raw \
|
||||
TOPIC_CACHE=cache \
|
||||
TOPIC_ANALYTICS=analytics \
|
||||
TOPIC_TRIGGER=trigger \
|
||||
TOPIC_EVENTS=events \
|
||||
GROUP_SINK=sink \
|
||||
GROUP_STORAGE=storage \
|
||||
GROUP_DB=db \
|
||||
GROUP_ENDER=ender \
|
||||
GROUP_CACHE=cache \
|
||||
AWS_REGION_WEB=eu-central-1 \
|
||||
AWS_REGION_IOS=eu-west-1 \
|
||||
AWS_REGION_ASSETS=eu-central-1 \
|
||||
|
||||
|
||||
ARG SERVICE_NAME
|
||||
RUN if [ "$SERVICE_NAME" = "http" ]; then \
|
||||
wget https://raw.githubusercontent.com/ua-parser/uap-core/master/regexes.yaml -O "$UAPARSER_FILE" &&\
|
||||
wget https://static.openreplay.com/geoip/GeoLite2-Country.mmdb -O "$MAXMINDDB_FILE"; fi
|
||||
|
||||
|
||||
COPY --from=build /root/service /root/service
|
||||
ENTRYPOINT /root/service
|
||||
40
backend/build.sh
Normal file
40
backend/build.sh
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Script to build api module
|
||||
# flags to accept:
|
||||
# ee: build for enterprize edition.
|
||||
# Default will be OSS build.
|
||||
|
||||
# Example
|
||||
# DOCKER_REPO=asayer.io bash buid.sh
|
||||
|
||||
git_sha1=$(git rev-parse HEAD)
|
||||
ee="false"
|
||||
check_prereq() {
|
||||
which docker || {
|
||||
echo "Docker not installed, please install docker."
|
||||
exit=1
|
||||
}
|
||||
[[ exit -eq 1 ]] && exit 1
|
||||
}
|
||||
|
||||
function build_api(){
|
||||
# Copy enterprize code
|
||||
[[ $1 == "ee" ]] && {
|
||||
cp ../ee/backend/* ./
|
||||
ee="true"
|
||||
}
|
||||
[[ $2 != "" ]] && {
|
||||
image="$2"
|
||||
docker build -t ${DOCKER_REPO:-'local'}/$image:${git_sha1} --build-arg SERVICE_NAME=$image .
|
||||
return
|
||||
}
|
||||
for image in $(ls services);
|
||||
do
|
||||
docker build -t ${DOCKER_REPO:-'local'}/$image:${git_sha1} --build-arg SERVICE_NAME=$image .
|
||||
echo "::set-output name=image::${DOCKER_REPO:-'local'}/$image:${git_sha1}"
|
||||
done
|
||||
}
|
||||
|
||||
check_prereq
|
||||
build_api $1 $2
|
||||
21
backend/go.mod
Normal file
21
backend/go.mod
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
module openreplay/backend
|
||||
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
github.com/btcsuite/btcutil v1.0.2
|
||||
github.com/confluentinc/confluent-kafka-go v1.5.2 // indirect
|
||||
github.com/go-redis/redis v6.15.9+incompatible
|
||||
github.com/google/uuid v1.1.1
|
||||
github.com/jackc/pgconn v1.6.0
|
||||
github.com/jackc/pgx/v4 v4.6.0
|
||||
github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451
|
||||
github.com/klauspost/compress v1.11.9
|
||||
github.com/klauspost/pgzip v1.2.5
|
||||
github.com/oschwald/maxminddb-golang v1.7.0
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
|
||||
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.5.2
|
||||
github.com/ClickHouse/clickhouse-go v1.4.3
|
||||
github.com/aws/aws-sdk-go v1.35.23
|
||||
)
|
||||
161
backend/go.sum
Normal file
161
backend/go.sum
Normal file
|
|
@ -0,0 +1,161 @@
|
|||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts=
|
||||
github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts=
|
||||
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
|
||||
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/confluentinc/confluent-kafka-go v1.4.2 h1:13EK9RTujF7lVkvHQ5Hbu6bM+Yfrq8L0MkJNnjHSd4Q=
|
||||
github.com/confluentinc/confluent-kafka-go v1.4.2/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
|
||||
github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||
github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA=
|
||||
github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE=
|
||||
github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s=
|
||||
github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI=
|
||||
github.com/jackc/pgconn v1.6.0 h1:8FiBxMxS/Z0eQ9BeE1HhL6pzPL1R5x+ZuQ+T86WgZ4I=
|
||||
github.com/jackc/pgconn v1.6.0/go.mod h1:yeseQo4xhQbgyJs2c87RAXOH2i624N0Fh1KSPJya7qo=
|
||||
github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE=
|
||||
github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8=
|
||||
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM=
|
||||
github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgproto3/v2 v2.0.2 h1:q1Hsy66zh4vuNsajBUF2PNqfAMMfxU5mk594lPE9vjY=
|
||||
github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA=
|
||||
github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8 h1:Q3tB+ExeflWUW7AFcAhXqk40s9mnNYLk1nOkKNZ5GnU=
|
||||
github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E=
|
||||
github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg=
|
||||
github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc=
|
||||
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
|
||||
github.com/jackc/pgtype v1.3.0 h1:l8JvKrby3RI7Kg3bYEeU9TA4vqC38QDpFCfcrC7KuN0=
|
||||
github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik=
|
||||
github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o=
|
||||
github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
|
||||
github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc=
|
||||
github.com/jackc/pgx/v4 v4.6.0 h1:Fh0O9GdlG4gYpjpwOqjdEodJUQM9jzN3Hdv7PN0xmm0=
|
||||
github.com/jackc/pgx/v4 v4.6.0/go.mod h1:vPh43ZzxijXUVJ+t/EmXBtFmbFVO72cuneCT9oAlxAg=
|
||||
github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk=
|
||||
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/oschwald/maxminddb-golang v1.7.0 h1:JmU4Q1WBv5Q+2KZy5xJI+98aUwTIrPPxZUkd5Cwr8Zc=
|
||||
github.com/oschwald/maxminddb-golang v1.7.0/go.mod h1:RXZtst0N6+FY/3qCNmZMBApR19cdQj43/NM9VkrNAis=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce h1:fb190+cK2Xz/dvi9Hv8eCYJYvIGUTN2/KLq1pT6CjEc=
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
|
||||
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe h1:aj/vX5epIlQQBEocKoM9nSAiNpakdQzElc8SaRFPu+I=
|
||||
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe/go.mod h1:OBcG9bn7sHtXgarhUEb3OfCnNsgtGnkVf41ilSZ3K3E=
|
||||
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59 h1:3zb4D3T4G8jdExgVU/95+vQXfpEPiMdCaZgmGVxjNHM=
|
||||
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191224085550-c709ea063b76 h1:Dho5nD6R3PcW2SH1or8vS0dszDaXRxIw55lBX7XiE5g=
|
||||
golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.4.2 h1:JabkIV98VYFqYKHHzXtgGMFuRgFBNTNzBytbGByzrJI=
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.4.2/go.mod h1:ZdI3yfYmdNSLQPNCpO1y00EHyWaHG5EnQEyL/ntAegY=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
30
backend/pkg/db/cache/messages_common.go
vendored
Normal file
30
backend/pkg/db/cache/messages_common.go
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
// . "openreplay/backend/pkg/db/types"
|
||||
)
|
||||
|
||||
func (c *PGCache) insertSessionEnd(sessionID uint64, timestamp uint64 ) error {
|
||||
//duration, err := c.Conn.InsertSessionEnd(sessionID, timestamp)
|
||||
_, err := c.Conn.InsertSessionEnd(sessionID, timestamp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.DeleteSession(sessionID)
|
||||
// session, err := c.GetSession(sessionID)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// session.Duration = &duration
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
func (c *PGCache) InsertIssueEvent(sessionID uint64, crash *IssueEvent) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Conn.InsertIssueEvent(sessionID, session.ProjectID, crash)
|
||||
}
|
||||
140
backend/pkg/db/cache/messages_ios.go
vendored
Normal file
140
backend/pkg/db/cache/messages_ios.go
vendored
Normal file
|
|
@ -0,0 +1,140 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
)
|
||||
|
||||
func (c *PGCache) InsertIOSSessionStart(sessionID uint64, s *IOSSessionStart) error {
|
||||
if c.sessions[ sessionID ] != nil {
|
||||
return errors.New("This session already in cache!")
|
||||
}
|
||||
c.sessions[ sessionID ] = &Session{
|
||||
SessionID: sessionID,
|
||||
Platform: "ios",
|
||||
Timestamp: s.Timestamp,
|
||||
ProjectID: uint32(s.ProjectID),
|
||||
TrackerVersion: s.TrackerVersion,
|
||||
RevID: s.RevID,
|
||||
UserUUID: s.UserUUID,
|
||||
UserOS: s.UserOS,
|
||||
UserOSVersion: s.UserOSVersion,
|
||||
UserDevice: s.UserDevice,
|
||||
UserCountry: s.UserCountry,
|
||||
}
|
||||
if err := c.Conn.InsertSessionStart(sessionID, c.sessions[ sessionID ]); err != nil {
|
||||
c.sessions[ sessionID ] = nil
|
||||
return err
|
||||
}
|
||||
return nil;
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertIOSSessionEnd(sessionID uint64, e *IOSSessionEnd) error {
|
||||
return c.insertSessionEnd(sessionID, e.Timestamp)
|
||||
}
|
||||
|
||||
|
||||
func (c *PGCache) InsertIOSScreenEnter(sessionID uint64, screenEnter *IOSScreenEnter) error {
|
||||
if err := c.Conn.InsertIOSScreenEnter(sessionID, screenEnter); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
session.PagesCount += 1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertIOSClickEvent(sessionID uint64, clickEvent *IOSClickEvent) error {
|
||||
if err := c.Conn.InsertIOSClickEvent(sessionID, clickEvent); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
session.EventsCount += 1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertIOSInputEvent(sessionID uint64, inputEvent *IOSInputEvent) error {
|
||||
if err := c.Conn.InsertIOSInputEvent(sessionID, inputEvent); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
session.EventsCount += 1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertIOSCrash(sessionID uint64, crash *IOSCrash) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.Conn.InsertIOSCrash(sessionID, session.ProjectID, crash); err != nil {
|
||||
return err
|
||||
}
|
||||
session.ErrorsCount += 1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertIOSIssueEvent(sessionID uint64, issueEvent *IOSIssueEvent) error {
|
||||
// session, err := c.GetSession(sessionID)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// TODO: unite IssueEvent message for the all platforms
|
||||
// if err := c.Conn.InsertIssueEvent(sessionID, session.ProjectID, issueEvent); err != nil {
|
||||
// return err
|
||||
// }
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertUserID(sessionID uint64, userID *IOSUserID) error {
|
||||
if err := c.Conn.InsertIOSUserID(sessionID, userID); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
session.UserID = &userID.Value
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertUserAnonymousID(sessionID uint64, userAnonymousID *IOSUserAnonymousID) error {
|
||||
if err := c.Conn.InsertIOSUserAnonymousID(sessionID, userAnonymousID); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
session.UserAnonymousID = &userAnonymousID.Value
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertMetadata(sessionID uint64, metadata *Metadata) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
project, err := c.GetProject(session.ProjectID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
keyNo := project.GetMetadataNo(metadata.Key)
|
||||
if err := c.Conn.InsertMetadata(sessionID, keyNo, metadata.Value); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
session.SetMetadata(keyNo, metadata.Value)
|
||||
return nil
|
||||
}
|
||||
|
||||
56
backend/pkg/db/cache/messages_web.go
vendored
Normal file
56
backend/pkg/db/cache/messages_web.go
vendored
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
)
|
||||
|
||||
|
||||
func (c *PGCache) InsertWebSessionStart(sessionID uint64, s *SessionStart) error {
|
||||
if c.sessions[ sessionID ] != nil {
|
||||
return errors.New("This session already in cache!")
|
||||
}
|
||||
c.sessions[ sessionID ] = &Session{
|
||||
SessionID: sessionID,
|
||||
Platform: "web",
|
||||
Timestamp: s.Timestamp,
|
||||
ProjectID: uint32(s.ProjectID),
|
||||
TrackerVersion: s.TrackerVersion,
|
||||
RevID: s.RevID,
|
||||
UserUUID: s.UserUUID,
|
||||
UserOS: s.UserOS,
|
||||
UserOSVersion: s.UserOSVersion,
|
||||
UserDevice: s.UserDevice,
|
||||
UserCountry: s.UserCountry,
|
||||
// web properties (TODO: unite different platform types)
|
||||
UserAgent: s.UserAgent,
|
||||
UserBrowser: s.UserBrowser,
|
||||
UserBrowserVersion: s.UserBrowserVersion,
|
||||
UserDeviceType: s.UserDeviceType,
|
||||
UserDeviceMemorySize: s.UserDeviceMemorySize,
|
||||
UserDeviceHeapSize: s.UserDeviceHeapSize,
|
||||
}
|
||||
if err := c.Conn.InsertSessionStart(sessionID, c.sessions[ sessionID ]); err != nil {
|
||||
c.sessions[ sessionID ] = nil
|
||||
return err
|
||||
}
|
||||
return nil;
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertWebSessionEnd(sessionID uint64, e *SessionEnd) error {
|
||||
return c.insertSessionEnd(sessionID, e.Timestamp)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertWebErrorEvent(sessionID uint64, e *ErrorEvent) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.Conn.InsertWebErrorEvent(sessionID, session.ProjectID, e); err != nil {
|
||||
return err
|
||||
}
|
||||
session.ErrorsCount += 1
|
||||
return nil
|
||||
}
|
||||
|
||||
40
backend/pkg/db/cache/pg_cache.go
vendored
Normal file
40
backend/pkg/db/cache/pg_cache.go
vendored
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
)
|
||||
|
||||
type ProjectMeta struct {
|
||||
*Project
|
||||
expirationTime time.Time
|
||||
}
|
||||
|
||||
// !TODO: remove old sessions by timeout to avoid memleaks
|
||||
|
||||
/*
|
||||
* Cache layer around the stateless PG adapter
|
||||
**/
|
||||
type PGCache struct {
|
||||
*postgres.Conn
|
||||
sessions map[uint64]*Session
|
||||
projects map[uint32]*ProjectMeta
|
||||
projectsByKeys map[string]*ProjectMeta
|
||||
projectExpirationTimeout time.Duration
|
||||
}
|
||||
|
||||
// TODO: create conn automatically
|
||||
func NewPGCache(pgConn *postgres.Conn, projectExpirationTimeoutMs int64) *PGCache {
|
||||
return &PGCache{
|
||||
Conn: pgConn,
|
||||
sessions: make(map[uint64]*Session),
|
||||
projects: make(map[uint32]*ProjectMeta),
|
||||
projectsByKeys: make(map[string]*ProjectMeta),
|
||||
projectExpirationTimeout: time.Duration(1000 * projectExpirationTimeoutMs),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
37
backend/pkg/db/cache/project.go
vendored
Normal file
37
backend/pkg/db/cache/project.go
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
)
|
||||
|
||||
func (c *PGCache) GetProjectByKey(projectKey string) (*Project, error) {
|
||||
if c.projectsByKeys[ projectKey ] != nil &&
|
||||
time.Now().Before(c.projectsByKeys[ projectKey ].expirationTime) {
|
||||
return c.projectsByKeys[ projectKey ].Project, nil
|
||||
}
|
||||
p, err := c.Conn.GetProjectByKey(projectKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.projectsByKeys[ projectKey ] = &ProjectMeta{ p, time.Now().Add(c.projectExpirationTimeout) }
|
||||
c.projects[ p.ProjectID ] = c.projectsByKeys[ projectKey ]
|
||||
return p, nil
|
||||
}
|
||||
|
||||
|
||||
|
||||
func (c *PGCache) GetProject(projectID uint32) (*Project, error) {
|
||||
if c.projects[ projectID ] != nil &&
|
||||
time.Now().Before(c.projects[ projectID ].expirationTime) {
|
||||
return c.projects[ projectID ].Project, nil
|
||||
}
|
||||
p, err := c.Conn.GetProject(projectID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.projects[ projectID ] = &ProjectMeta{ p, time.Now().Add(c.projectExpirationTimeout) }
|
||||
c.projectsByKeys[ p.ProjectKey ] = c.projects[ projectID ]
|
||||
return p, nil
|
||||
}
|
||||
|
||||
30
backend/pkg/db/cache/session.go
vendored
Normal file
30
backend/pkg/db/cache/session.go
vendored
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"github.com/jackc/pgx/v4"
|
||||
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
)
|
||||
|
||||
func (c *PGCache) GetSession(sessionID uint64) (*Session, error) {
|
||||
if s, inCache := c.sessions[ sessionID ]; inCache {
|
||||
// TODO: review. Might cause bugs in case of multiple instances
|
||||
if s == nil {
|
||||
return nil, pgx.ErrNoRows
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
s, err := c.Conn.GetSession(sessionID)
|
||||
if err == pgx.ErrNoRows {
|
||||
c.sessions[ sessionID ] = nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.sessions[ sessionID ] = s
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (c *PGCache) DeleteSession(sessionID uint64) {
|
||||
delete(c.sessions, sessionID)
|
||||
}
|
||||
240
backend/pkg/db/postgres/alert.go
Normal file
240
backend/pkg/db/postgres/alert.go
Normal file
|
|
@ -0,0 +1,240 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
sq "github.com/Masterminds/squirrel"
|
||||
"log"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TimeString sql.NullString
|
||||
type query struct {
|
||||
Left string `db:"query.left" json:"left"`
|
||||
Operator string `db:"query.operator" json:"operator"`
|
||||
Right float64 `db:"query.right" json:"right"`
|
||||
}
|
||||
type options struct {
|
||||
RenotifyInterval int64 `db:"options.renotifyInterval" json:"renotifyInterval"`
|
||||
LastNotification int64 `db:"options.lastNotification" json:"lastNotification;omitempty"`
|
||||
CurrentPeriod int64 `db:"options.currentPeriod" json:"currentPeriod"`
|
||||
PreviousPeriod int64 `db:"options.previousPeriod" json:"previousPeriod;omitempty"`
|
||||
Message []map[string]string `db:"options.message" json:"message;omitempty"`
|
||||
Change string `db:"options.change" json:"change;omitempty"`
|
||||
}
|
||||
type Alert struct {
|
||||
AlertID uint32 `db:"alert_id" json:"alert_id"`
|
||||
ProjectID uint32 `db:"project_id" json:"project_id"`
|
||||
Name string `db:"name" json:"name"`
|
||||
Description sql.NullString `db:"description" json:"description"`
|
||||
Active bool `db:"active" json:"active"`
|
||||
DetectionMethod string `db:"detection_method" json:"detection_method"`
|
||||
Query query `db:"query" json:"query"`
|
||||
DeletedAt *int64 `db:"deleted_at" json:"deleted_at"`
|
||||
CreatedAt *int64 `db:"created_at" json:"created_at"`
|
||||
Options options `db:"options" json:"options"`
|
||||
TenantId uint32 `db:"tenant_id" json:"tenant_id"`
|
||||
}
|
||||
|
||||
func (pg *Conn) IterateAlerts(iter func(alert *Alert, err error)) error {
|
||||
rows, err := pg.query(`
|
||||
SELECT
|
||||
alerts.alert_id,
|
||||
alerts.project_id,
|
||||
alerts.name,
|
||||
alerts.description,
|
||||
alerts.active,
|
||||
alerts.detection_method,
|
||||
alerts.query,
|
||||
CAST(EXTRACT(epoch FROM alerts.deleted_at) * 1000 AS BIGINT) AS deleted_at,
|
||||
CAST(EXTRACT(epoch FROM alerts.created_at) * 1000 AS BIGINT) AS created_at,
|
||||
alerts.options,
|
||||
projects.tenant_id
|
||||
FROM public.alerts INNER JOIN public.projects USING(project_id)
|
||||
WHERE alerts.active AND alerts.deleted_at ISNULL;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
a := new(Alert)
|
||||
if err = rows.Scan(
|
||||
&a.AlertID,
|
||||
&a.ProjectID,
|
||||
&a.Name,
|
||||
&a.Description,
|
||||
&a.Active,
|
||||
&a.DetectionMethod,
|
||||
&a.Query,
|
||||
&a.DeletedAt,
|
||||
&a.CreatedAt,
|
||||
&a.Options,
|
||||
&a.TenantId,
|
||||
); err != nil {
|
||||
iter(nil, err)
|
||||
continue
|
||||
}
|
||||
iter(a, nil)
|
||||
}
|
||||
|
||||
if err = rows.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pg *Conn) SaveLastNotification(allIds []uint32) error {
|
||||
var paramrefs string
|
||||
for _, v := range allIds {
|
||||
paramrefs += strconv.Itoa(int(v)) + `,`
|
||||
}
|
||||
paramrefs = paramrefs[:len(paramrefs)-1] // remove last ","
|
||||
q := "UPDATE public.Alerts SET options = options||'{\"lastNotification\":" + strconv.Itoa(int(time.Now().Unix()*1000)) + "}'::jsonb WHERE alert_id IN (" + paramrefs + ");"
|
||||
//log.Println(q)
|
||||
log.Println("Updating PG")
|
||||
return pg.exec(q)
|
||||
}
|
||||
|
||||
type columnDefinition struct {
|
||||
table string
|
||||
formula string
|
||||
condition string
|
||||
group string
|
||||
}
|
||||
|
||||
var LeftToDb = map[string]columnDefinition{
|
||||
"performance.dom_content_loaded.average": {table: "pages", formula: "COALESCE(AVG(NULLIF(dom_content_loaded_event_start ,0)),0)"},
|
||||
"performance.first_meaningful_paint.average": {table: "pages", formula: "COALESCE(AVG(NULLIF(first_contentful_paint,0)),0)"},
|
||||
"performance.page_load_time.average": {table: "pages", formula: "AVG(NULLIF(load_event_end ,0))"},
|
||||
"performance.dom_build_time.average": {table: "pages", formula: "AVG(NULLIF(dom_building_time,0))"},
|
||||
"performance.speed_index.average": {table: "pages", formula: "AVG(NULLIF(speed_index,0))"},
|
||||
//"avgSpeedIndexByLocation": {table: "pages", formula: "AVG(NULLIF(speed_index,0))", group: "user_country"},
|
||||
"performance.page_response_time.average": {table: "pages", formula: "AVG(NULLIF(response_time,0))"},
|
||||
"performance.ttfb.average": {table: "pages", formula: "AVG(NULLIF(first_paint,0))"},
|
||||
//"avgDomContentLoaded": {table: "pages", formula: "AVG(NULLIF(dom_content_loaded_event_time,0))"},
|
||||
"performance.time_to_render.averag": {table: "pages", formula: "AVG(NULLIF(visually_complete,0))"},
|
||||
"performance.image_load_time.average": {table: "resources", formula: "AVG(NULLIF(duration,0))", condition: "type=='img'"},
|
||||
"performance.request_load_time.average": {table: "resources", formula: "AVG(NULLIF(duration,0))", condition: "type=='fetch'"},
|
||||
"resources.load_time.average": {table: "resources", formula: "AVG(NULLIF(duration,0))"},
|
||||
"resources.missing.count": {table: "resources", formula: "COUNT(DISTINCT url_hostpath)", condition: "success==0"},
|
||||
"errors.4xx_5xx.count": {table: "resources", formula: "COUNT(session_id)", condition: "intDiv(status, 100)!=2"},
|
||||
"errors.4xx.count": {table: "resources", formula: "COUNT(session_id)", condition: "intDiv(status, 100)==4"},
|
||||
"errors.5xx.count": {table: "resources", formula: "COUNT(session_id)", condition: "intDiv(status, 100)==5"},
|
||||
"errors.javascript.impacted_sessions.count": {table: "resources", formula: "COUNT(DISTINCT session_id)", condition: "success=0 AND type='script'"},
|
||||
"performance.crashes.count": {table: "sessions", formula: "COUNT(DISTINCT session_id)", condition: "errors_count > 0"},
|
||||
"errors.javascript.count": {table: "errors", formula: "COUNT(DISTINCT session_id)", condition: "source=='js_exception'"},
|
||||
"errors.backend.count": {table: "errors", formula: "COUNT(DISTINCT session_id)", condition: "source!='js_exception'"},
|
||||
}
|
||||
|
||||
//This is the frequency of execution for each threshold
|
||||
var TimeInterval = map[int64]int64{
|
||||
15: 3,
|
||||
30: 5,
|
||||
60: 10,
|
||||
120: 20,
|
||||
240: 30,
|
||||
1440: 60,
|
||||
}
|
||||
|
||||
func (a *Alert) CanCheck() bool {
|
||||
now := time.Now().Unix() * 1000
|
||||
var repetitionBase int64
|
||||
|
||||
if repetitionBase = a.Options.CurrentPeriod; a.DetectionMethod == "change" && a.Options.CurrentPeriod > a.Options.PreviousPeriod {
|
||||
repetitionBase = a.Options.PreviousPeriod
|
||||
}
|
||||
|
||||
if _, ok := TimeInterval[repetitionBase]; !ok {
|
||||
log.Printf("repetitionBase: %d NOT FOUND", repetitionBase)
|
||||
return false
|
||||
}
|
||||
//for i := int64(0); i <= 10; i++ {
|
||||
// now += 60 * 1000
|
||||
// log.Printf("%s: ((now-*a.CreatedAt)%%TimeInterval[repetitionBase]*60*1000) < 60*1000: %t", a.Name, ((now-*a.CreatedAt)%(TimeInterval[repetitionBase]*60*1000)) < 60*1000)
|
||||
// log.Printf("now: %d", now)
|
||||
// log.Printf("*a.CreatedAt: %d", *a.CreatedAt)
|
||||
// log.Printf("now-*a.CreatedAt: %d", now-*a.CreatedAt)
|
||||
// log.Printf("(now-*a.CreatedAt)%%TimeInterval[repetitionBase]*60*1000: %d", (now-*a.CreatedAt)%TimeInterval[repetitionBase]*60*1000)
|
||||
//}
|
||||
//return false
|
||||
//log.Printf("%s: a.Options.RenotifyInterval<=0: %t", a.Name, a.Options.RenotifyInterval <= 0)
|
||||
//log.Printf("%s: a.Options.LastNotification <= 0: %t", a.Name, a.Options.LastNotification <= 0)
|
||||
//log.Printf("%s: (now-a.Options.LastNotification) > a.Options.RenotifyInterval*60*1000: %t", a.Name, (now-a.Options.LastNotification) > a.Options.RenotifyInterval*60*1000)
|
||||
//log.Printf("%s: ((now-*a.CreatedAt)%%TimeInterval[repetitionBase]*60*1000) < 60*1000: %t", a.Name, ((now-*a.CreatedAt)%TimeInterval[repetitionBase]*60*1000) < 60*1000)
|
||||
//log.Printf("%s: TimeInterval[repetitionBase]: %d", a.Name, TimeInterval[repetitionBase])
|
||||
return a.DeletedAt == nil && a.Active &&
|
||||
(a.Options.RenotifyInterval <= 0 ||
|
||||
a.Options.LastNotification <= 0 ||
|
||||
((now - a.Options.LastNotification) > a.Options.RenotifyInterval*60*1000)) &&
|
||||
((now-*a.CreatedAt)%(TimeInterval[repetitionBase]*60*1000)) < 60*1000
|
||||
}
|
||||
|
||||
func (a *Alert) Build() (sq.SelectBuilder, error) {
|
||||
colDef := LeftToDb[a.Query.Left]
|
||||
subQ := sq.
|
||||
Select(colDef.formula + " AS value").
|
||||
From(colDef.table).
|
||||
Where(sq.And{sq.Eq{"project_id": a.ProjectID},
|
||||
sq.Expr(colDef.condition)})
|
||||
q := sq.Select(fmt.Sprint("value, coalesce(value,0)", a.Query.Operator, a.Query.Right, " AS valid"))
|
||||
if len(colDef.group) > 0 {
|
||||
subQ = subQ.Column(colDef.group + " AS group_value")
|
||||
subQ = subQ.GroupBy(colDef.group)
|
||||
q = q.Column("group_value")
|
||||
}
|
||||
|
||||
if a.DetectionMethod == "threshold" {
|
||||
q = q.FromSelect(subQ.Where(sq.Expr("datetime>=toDateTime(?)", time.Now().Unix()-a.Options.CurrentPeriod*60)), "stat")
|
||||
} else if a.DetectionMethod == "change" {
|
||||
if a.Options.Change == "change" {
|
||||
if len(colDef.group) == 0 {
|
||||
sub1, args1, _ := subQ.Where(sq.Expr("datetime>=toDateTime(?)", time.Now().Unix()-a.Options.CurrentPeriod*60)).ToSql()
|
||||
sub2, args2, _ := subQ.Where(
|
||||
sq.And{
|
||||
sq.Expr("datetime<toDateTime(?)", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
||||
sq.Expr("datetime>=toDateTime(?)", time.Now().Unix()-2*a.Options.CurrentPeriod*60),
|
||||
}).ToSql()
|
||||
sub1, _, _ = sq.Expr("SELECT ((" + sub1 + ")-(" + sub2 + ")) AS value").ToSql()
|
||||
q = q.JoinClause("FROM ("+sub1+") AS stat", append(args1, args2...)...)
|
||||
} else {
|
||||
subq1 := subQ.Where(sq.Expr("datetime>=toDateTime(?)", time.Now().Unix()-a.Options.CurrentPeriod*60))
|
||||
sub2, args2, _ := subQ.Where(
|
||||
sq.And{
|
||||
sq.Expr("datetime<toDateTime(?)", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
||||
sq.Expr("datetime>=toDateTime(?)", time.Now().Unix()-2*a.Options.CurrentPeriod*60),
|
||||
}).ToSql()
|
||||
sub1 := sq.Select("group_value", "(stat1.value-stat2.value) AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...)
|
||||
q = q.FromSelect(sub1, "stat")
|
||||
}
|
||||
} else if a.Options.Change == "percent" {
|
||||
if len(colDef.group) == 0 {
|
||||
sub1, args1, _ := subQ.Where(sq.Expr("datetime>=toDateTime(?)", time.Now().Unix()-a.Options.CurrentPeriod*60)).ToSql()
|
||||
sub2, args2, _ := subQ.Where(
|
||||
sq.And{
|
||||
sq.Expr("datetime<toDateTime(?)", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
||||
sq.Expr("datetime>=toDateTime(?)", time.Now().Unix()-a.Options.PreviousPeriod*60-a.Options.CurrentPeriod*60),
|
||||
}).ToSql()
|
||||
sub1, _, _ = sq.Expr("SELECT ((" + sub1 + ")/(" + sub2 + ")-1)*100 AS value").ToSql()
|
||||
q = q.JoinClause("FROM ("+sub1+") AS stat", append(args1, args2...)...)
|
||||
} else {
|
||||
subq1 := subQ.Where(sq.Expr("datetime>=toDateTime(?)", time.Now().Unix()-a.Options.CurrentPeriod*60))
|
||||
sub2, args2, _ := subQ.Where(
|
||||
sq.And{
|
||||
sq.Expr("datetime<toDateTime(?)", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
||||
sq.Expr("datetime>=toDateTime(?)", time.Now().Unix()-a.Options.PreviousPeriod*60-a.Options.CurrentPeriod*60),
|
||||
}).ToSql()
|
||||
sub1 := sq.Select("group_value", "(stat1.value/stat2.value-1)*100 AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...)
|
||||
q = q.FromSelect(sub1, "stat")
|
||||
}
|
||||
} else {
|
||||
return q, errors.New("unsupported change method")
|
||||
}
|
||||
|
||||
} else {
|
||||
return q, errors.New("unsupported detection method")
|
||||
}
|
||||
return q, nil
|
||||
}
|
||||
63
backend/pkg/db/postgres/connector.go
Normal file
63
backend/pkg/db/postgres/connector.go
Normal file
|
|
@ -0,0 +1,63 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
)
|
||||
|
||||
type Conn struct {
|
||||
c *pgxpool.Pool // TODO: conditional usage of Pool/Conn (use interface?)
|
||||
}
|
||||
|
||||
func NewConn(url string) *Conn {
|
||||
c, err := pgxpool.Connect(context.Background(), url)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
return &Conn{c}
|
||||
}
|
||||
|
||||
func (conn *Conn) Close() error {
|
||||
conn.c.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) query(sql string, args ...interface{}) (pgx.Rows, error) {
|
||||
return conn.c.Query(context.Background(), sql, args...)
|
||||
}
|
||||
|
||||
func (conn *Conn) queryRow(sql string, args ...interface{}) pgx.Row {
|
||||
return conn.c.QueryRow(context.Background(), sql, args...)
|
||||
}
|
||||
|
||||
func (conn *Conn) exec(sql string, args ...interface{}) error {
|
||||
_, err := conn.c.Exec(context.Background(), sql, args...)
|
||||
return err
|
||||
}
|
||||
|
||||
type _Tx struct {
|
||||
pgx.Tx
|
||||
}
|
||||
|
||||
func (conn *Conn) begin() (_Tx, error) {
|
||||
tx, err := conn.c.Begin(context.Background())
|
||||
return _Tx{tx}, err
|
||||
}
|
||||
|
||||
func (tx _Tx) exec(sql string, args ...interface{}) error {
|
||||
_, err := tx.Exec(context.Background(), sql, args...)
|
||||
return err;
|
||||
}
|
||||
|
||||
func (tx _Tx) rollback() error {
|
||||
return tx.Rollback(context.Background())
|
||||
}
|
||||
|
||||
func (tx _Tx) commit() error {
|
||||
return tx.Commit(context.Background())
|
||||
}
|
||||
|
||||
|
||||
16
backend/pkg/db/postgres/errors.go
Normal file
16
backend/pkg/db/postgres/errors.go
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/jackc/pgconn"
|
||||
"github.com/jackc/pgerrcode"
|
||||
)
|
||||
|
||||
func IsPkeyViolation(err error) bool {
|
||||
var pgErr *pgconn.PgError
|
||||
if errors.As(err, &pgErr) && pgErr.Code == pgerrcode.UniqueViolation {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
35
backend/pkg/db/postgres/helpers.go
Normal file
35
backend/pkg/db/postgres/helpers.go
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func getIssueScore(issueEvent *messages.IssueEvent) int {
|
||||
switch issueEvent.Type {
|
||||
case "crash", "dead_click", "memory", "cpu":
|
||||
return 1000
|
||||
case "bad_request", "excessive_scrolling", "click_rage", "missing_resource" :
|
||||
return 500
|
||||
case "slow_resource", "slow_page_load":
|
||||
return 100
|
||||
default:
|
||||
return 100
|
||||
}
|
||||
}
|
||||
|
||||
func calcDomBuildingTime(pe *messages.PageEvent) uint64 {
|
||||
if pe == nil {
|
||||
return 0
|
||||
}
|
||||
if pe.DomContentLoadedEventStart < pe.ResponseEnd {
|
||||
return 0
|
||||
}
|
||||
return pe.DomContentLoadedEventStart - pe.ResponseEnd
|
||||
}
|
||||
|
||||
func calcResponseTime(pe *messages.PageEvent) uint64 {
|
||||
if pe.ResponseStart <= pe.ResponseEnd {
|
||||
return pe.ResponseEnd - pe.ResponseStart
|
||||
}
|
||||
return 0
|
||||
}
|
||||
49
backend/pkg/db/postgres/integration.go
Normal file
49
backend/pkg/db/postgres/integration.go
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
//go:generate $GOPATH/bin/easytags $GOFILE json
|
||||
|
||||
type Integration struct {
|
||||
ProjectID uint32 `json:"project_id"`
|
||||
Provider string `json:"provider"`
|
||||
//DeletedAt *int64 `json:"deleted_at"`
|
||||
RequestData json.RawMessage `json:"request_data"`
|
||||
Options json.RawMessage `json:"options"`
|
||||
}
|
||||
|
||||
func (pg *Conn) IterateIntegrationsOrdered(iter func(integration *Integration, err error)) error {
|
||||
rows, err := pg.query(`
|
||||
SELECT project_id, provider, options, request_data
|
||||
FROM integrations
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
i := new(Integration)
|
||||
if err := rows.Scan(&i.ProjectID, &i.Provider, &i.Options, &i.RequestData); err != nil {
|
||||
iter(nil, err)
|
||||
continue
|
||||
}
|
||||
iter(i, nil)
|
||||
}
|
||||
|
||||
if err = rows.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pg *Conn) UpdateIntegrationRequestData(i *Integration) error {
|
||||
return pg.exec(`
|
||||
UPDATE integrations
|
||||
SET request_data = $1
|
||||
WHERE project_id=$2 AND provider=$3`,
|
||||
i.RequestData, i.ProjectID, i.Provider,
|
||||
)
|
||||
}
|
||||
82
backend/pkg/db/postgres/listener.go
Normal file
82
backend/pkg/db/postgres/listener.go
Normal file
|
|
@ -0,0 +1,82 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
|
||||
|
||||
)
|
||||
|
||||
type Listener struct {
|
||||
conn *pgx.Conn
|
||||
Integrations chan *Integration
|
||||
Alerts chan *Alert
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
func NewIntegrationsListener(url string) (*Listener, error) {
|
||||
conn, err := pgx.Connect(context.Background(), url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener := &Listener{
|
||||
conn: conn,
|
||||
Errors: make(chan error),
|
||||
}
|
||||
listener.Integrations = make(chan *Integration, 50)
|
||||
if _, err := conn.Exec(context.Background(), "LISTEN integration"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go listener.listen()
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
func NewAlertsListener(url string) (*Listener, error) {
|
||||
conn, err := pgx.Connect(context.Background(), url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener := &Listener{
|
||||
conn: conn,
|
||||
Errors: make(chan error),
|
||||
}
|
||||
listener.Alerts = make(chan *Alert, 50)
|
||||
if _, err := conn.Exec(context.Background(), "LISTEN alert"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go listener.listen()
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
func (listener *Listener) listen() {
|
||||
for {
|
||||
notification, err := listener.conn.WaitForNotification(context.Background())
|
||||
if err != nil {
|
||||
listener.Errors <- err
|
||||
continue
|
||||
}
|
||||
switch notification.Channel {
|
||||
case "integration":
|
||||
integrationP := new(Integration)
|
||||
if err := json.Unmarshal([]byte(notification.Payload), integrationP); err != nil {
|
||||
listener.Errors <- fmt.Errorf("%v | Payload: %v", err, notification.Payload)
|
||||
} else {
|
||||
listener.Integrations <- integrationP
|
||||
}
|
||||
case "alert":
|
||||
alertP := new(Alert)
|
||||
if err := json.Unmarshal([]byte(notification.Payload), alertP); err != nil {
|
||||
listener.Errors <- fmt.Errorf("%v | Payload: %v", err, notification.Payload)
|
||||
} else {
|
||||
listener.Alerts <- alertP
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (listener *Listener) Close() error {
|
||||
return listener.conn.Close(context.Background())
|
||||
}
|
||||
224
backend/pkg/db/postgres/messages_common.go
Normal file
224
backend/pkg/db/postgres/messages_common.go
Normal file
|
|
@ -0,0 +1,224 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
"fmt"
|
||||
|
||||
"openreplay/backend/pkg/hashid"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/db/types"
|
||||
)
|
||||
|
||||
func getAutocompleteType(baseType string, platform string) string {
|
||||
if platform == "web" {
|
||||
return baseType
|
||||
}
|
||||
return baseType + "_" + strings.ToUpper(platform)
|
||||
|
||||
}
|
||||
|
||||
func (conn *Conn) insertAutocompleteValue(sessionID uint64, tp string, value string) {
|
||||
if len(value) == 0 {
|
||||
return
|
||||
}
|
||||
if err := conn.exec(`
|
||||
INSERT INTO autocomplete (
|
||||
value,
|
||||
type,
|
||||
project_id
|
||||
) (SELECT
|
||||
$1, $2, project_id
|
||||
FROM sessions
|
||||
WHERE session_id = $3
|
||||
) ON CONFLICT DO NOTHING`,
|
||||
value, tp, sessionID,
|
||||
); err != nil {
|
||||
log.Printf("Insert autocomplete error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertSessionStart(sessionID uint64, s *types.Session) error {
|
||||
if err := conn.exec(`
|
||||
INSERT INTO sessions (
|
||||
session_id, project_id, start_ts,
|
||||
user_uuid, user_device, user_device_type, user_country,
|
||||
user_os, user_os_version,
|
||||
rev_id,
|
||||
tracker_version, issue_score,
|
||||
platform,
|
||||
user_agent, user_browser, user_browser_version, user_device_memory_size, user_device_heap_size
|
||||
) VALUES (
|
||||
$1, $2, $3,
|
||||
$4, $5, $6, $7,
|
||||
$8, NULLIF($9, ''),
|
||||
NULLIF($10, ''),
|
||||
$11, $12,
|
||||
$13,
|
||||
NULLIF($14, ''), NULLIF($15, ''), NULLIF($16, ''), NULLIF($17, 0), NULLIF($18, 0::bigint)
|
||||
)`,
|
||||
sessionID, s.ProjectID, s.Timestamp,
|
||||
s.UserUUID, s.UserDevice, s.UserDeviceType, s.UserCountry,
|
||||
s.UserOS, s.UserOSVersion,
|
||||
s.RevID,
|
||||
s.TrackerVersion, s.Timestamp/1000,
|
||||
s.Platform,
|
||||
s.UserAgent, s.UserBrowser, s.UserBrowserVersion, s.UserDeviceMemorySize, s.UserDeviceHeapSize,
|
||||
); err != nil {
|
||||
return err;
|
||||
}
|
||||
conn.insertAutocompleteValue(sessionID, getAutocompleteType("USEROS", s.Platform), s.UserOS)
|
||||
conn.insertAutocompleteValue(sessionID, getAutocompleteType("USERDEVICE", s.Platform), s.UserDevice)
|
||||
conn.insertAutocompleteValue(sessionID, getAutocompleteType("USERCOUNTRY", s.Platform), s.UserCountry)
|
||||
conn.insertAutocompleteValue(sessionID, getAutocompleteType("REVID", s.Platform), s.RevID)
|
||||
// s.Platform == "web"
|
||||
conn.insertAutocompleteValue(sessionID, "USERBROWSER", s.UserBrowser)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertSessionEnd(sessionID uint64, timestamp uint64) (uint64, error) {
|
||||
// Search acceleration
|
||||
if err := conn.exec(`
|
||||
UPDATE sessions
|
||||
SET issue_types=(SELECT COALESCE(ARRAY_AGG(DISTINCT ps.type), '{}')::issue_type[]
|
||||
FROM events_common.issues
|
||||
INNER JOIN issues AS ps USING (issue_id)
|
||||
WHERE session_id = $1)
|
||||
WHERE session_id = $1
|
||||
`,
|
||||
sessionID,
|
||||
); err != nil {
|
||||
log.Printf("Error while updating issue_types %v", sessionID)
|
||||
}
|
||||
|
||||
var dur uint64
|
||||
if err := conn.queryRow(`
|
||||
UPDATE sessions SET duration=$2 - start_ts
|
||||
WHERE session_id=$1
|
||||
RETURNING duration
|
||||
`,
|
||||
sessionID, timestamp,
|
||||
).Scan(&dur); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return dur, nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertRequest(sessionID uint64, timestamp uint64, index uint64, url string, duration uint64, success bool) error {
|
||||
return conn.exec(`
|
||||
INSERT INTO events_common.requests (
|
||||
session_id, timestamp, seq_index, url, duration, success
|
||||
) VALUES (
|
||||
$1, $2, $3, $4, $5, $6
|
||||
)`,
|
||||
sessionID, timestamp,
|
||||
getSqIdx(index),
|
||||
url, duration, success,
|
||||
)
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertCustomEvent(sessionID uint64, timestamp uint64, index uint64, name string, payload string) error {
|
||||
return conn.exec(`
|
||||
INSERT INTO events_common.customs (
|
||||
session_id, timestamp, seq_index, name, payload
|
||||
) VALUES (
|
||||
$1, $2, $3, $4, $5
|
||||
)`,
|
||||
sessionID, timestamp,
|
||||
getSqIdx(index),
|
||||
name, payload,
|
||||
)
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertUserID(sessionID uint64, userID string) error {
|
||||
return conn.exec(`
|
||||
UPDATE sessions SET user_id = $1
|
||||
WHERE session_id = $2`,
|
||||
userID, sessionID,
|
||||
)
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertUserAnonymousID(sessionID uint64, userAnonymousID string) error {
|
||||
return conn.exec(`
|
||||
UPDATE sessions SET user_anonymous_id = $1
|
||||
WHERE session_id = $2`,
|
||||
userAnonymousID, sessionID,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
func (conn *Conn) InsertMetadata(sessionID uint64, keyNo uint, value string) error {
|
||||
return conn.exec(fmt.Sprintf(`
|
||||
UPDATE sessions SET metadata_%v = $1
|
||||
WHERE session_id = $2`, keyNo),
|
||||
value, sessionID,
|
||||
)
|
||||
// conn.insertAutocompleteValue(sessionID, "METADATA", value)
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIssueEvent(sessionID uint64, projectID uint32, e *messages.IssueEvent) error {
|
||||
tx, err := conn.begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.rollback()
|
||||
issueID := hashid.IssueID(projectID, e)
|
||||
|
||||
// TEMP. TODO: nullable & json message field type
|
||||
payload := &e.Payload;
|
||||
if *payload == "" || *payload == "{}" {
|
||||
payload = nil
|
||||
}
|
||||
context := &e.Context;
|
||||
if *context == "" || *context == "{}" {
|
||||
context = nil
|
||||
}
|
||||
|
||||
if err = tx.exec(`
|
||||
INSERT INTO issues (
|
||||
project_id, issue_id, type, context_string, context
|
||||
) (SELECT
|
||||
project_id, $2, $3, $4, CAST($5 AS jsonb)
|
||||
FROM sessions
|
||||
WHERE session_id = $1
|
||||
)ON CONFLICT DO NOTHING`,
|
||||
sessionID, issueID, e.Type, e.ContextString, context,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.exec(`
|
||||
INSERT INTO events_common.issues (
|
||||
session_id, issue_id, timestamp, seq_index, payload
|
||||
) VALUES (
|
||||
$1, $2, $3, $4, CAST($5 AS jsonb)
|
||||
)`,
|
||||
sessionID, issueID, e.Timestamp,
|
||||
getSqIdx(e.MessageID),
|
||||
payload,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.exec(`
|
||||
UPDATE sessions SET issue_score = issue_score + $2
|
||||
WHERE session_id = $1`,
|
||||
sessionID, getIssueScore(e),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: no redundancy. Deliver to UI in a different way
|
||||
if e.Type == "custom" {
|
||||
if err = tx.exec(`
|
||||
INSERT INTO events_common.customs
|
||||
(session_id, seq_index, timestamp, name, payload, level)
|
||||
VALUES
|
||||
($1, $2, $3, $4, $5, 'error')
|
||||
`,
|
||||
sessionID, getSqIdx(e.MessageID), e.Timestamp, e.ContextString, e.Payload,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return tx.commit()
|
||||
}
|
||||
|
||||
|
||||
181
backend/pkg/db/postgres/messages_ios.go
Normal file
181
backend/pkg/db/postgres/messages_ios.go
Normal file
|
|
@ -0,0 +1,181 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/hashid"
|
||||
"openreplay/backend/pkg/url"
|
||||
)
|
||||
|
||||
func (conn *Conn) InsertIOSCustomEvent(sessionID uint64, e *messages.IOSCustomEvent) error {
|
||||
err := conn.InsertCustomEvent(sessionID, e.Timestamp, e.Index, e.Name, e.Payload)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, "CUSTOM_IOS", e.Name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSUserID(sessionID uint64, userID *messages.IOSUserID) error {
|
||||
err := conn.InsertUserID(sessionID, userID.Value)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, "USERID_IOS", userID.Value)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSUserAnonymousID(sessionID uint64, userAnonymousID *messages.IOSUserAnonymousID) error {
|
||||
err := conn.InsertUserAnonymousID(sessionID, userAnonymousID.Value)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, "USERANONYMOUSID_IOS", userAnonymousID.Value)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSNetworkCall(sessionID uint64, e *messages.IOSNetworkCall) error {
|
||||
err := conn.InsertRequest(sessionID, e.Timestamp, e.Index, e.URL, e.Duration, e.Success)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, "REQUEST_IOS", url.DiscardURLQuery(e.URL))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSScreenEnter(sessionID uint64, screenEnter *messages.IOSScreenEnter) error {
|
||||
tx, err := conn.begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.rollback()
|
||||
|
||||
if err = tx.exec(`
|
||||
INSERT INTO events_ios.views (
|
||||
session_id, timestamp, seq_index, name
|
||||
) VALUES (
|
||||
$1, $2, $3, $4
|
||||
)`,
|
||||
sessionID, screenEnter.Timestamp, screenEnter.Index, screenEnter.ViewName,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.exec(`
|
||||
UPDATE sessions SET pages_count = pages_count + 1
|
||||
WHERE session_id = $1`,
|
||||
sessionID,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
conn.insertAutocompleteValue(sessionID, "VIEW_IOS", screenEnter.ViewName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSClickEvent(sessionID uint64, clickEvent *messages.IOSClickEvent) error {
|
||||
tx, err := conn.begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.rollback()
|
||||
|
||||
if err = tx.exec(`
|
||||
INSERT INTO events_ios.clicks (
|
||||
session_id, timestamp, seq_index, label
|
||||
) VALUES (
|
||||
$1, $2, $3, $4
|
||||
)`,
|
||||
sessionID, clickEvent.Timestamp, clickEvent.Index, clickEvent.Label,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.exec(`
|
||||
UPDATE sessions SET events_count = events_count + 1
|
||||
WHERE session_id = $1`,
|
||||
sessionID,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
conn.insertAutocompleteValue(sessionID, "CLICK_IOS", clickEvent.Label)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSInputEvent(sessionID uint64, inputEvent *messages.IOSInputEvent) error {
|
||||
tx, err := conn.begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.rollback()
|
||||
|
||||
var value interface{} = inputEvent.Value
|
||||
if inputEvent.ValueMasked {
|
||||
value = nil
|
||||
}
|
||||
|
||||
if err = tx.exec(`
|
||||
INSERT INTO events_ios.inputs (
|
||||
session_id, timestamp, seq_index, label, value
|
||||
) VALUES (
|
||||
$1, $2, $3, $4, $5
|
||||
)`,
|
||||
sessionID, inputEvent.Timestamp, inputEvent.Index, inputEvent.Label, value,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.exec(`
|
||||
UPDATE sessions SET events_count = events_count + 1
|
||||
WHERE session_id = $1`,
|
||||
sessionID,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
conn.insertAutocompleteValue(sessionID, "INPUT_IOS", inputEvent.Label)
|
||||
// conn.insertAutocompleteValue(sessionID, "INPUT_VALUE", inputEvent.Label)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSCrash(sessionID uint64, projectID uint32, crash *messages.IOSCrash) error {
|
||||
tx, err := conn.begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.rollback()
|
||||
|
||||
crashID := hashid.IOSCrashID(projectID, crash)
|
||||
|
||||
if err = tx.exec(`
|
||||
INSERT INTO crashes_ios (
|
||||
project_id, crash_id, name, reason, stacktrace
|
||||
) (SELECT
|
||||
project_id, $2, $3, $4, $5
|
||||
FROM sessions
|
||||
WHERE session_id = $1
|
||||
)ON CONFLICT DO NOTHING`,
|
||||
sessionID, crashID, crash.Name, crash.Reason, crash.Stacktrace,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.exec(`
|
||||
INSERT INTO events_ios.crashes (
|
||||
session_id, timestamp, seq_index, crash_id
|
||||
) VALUES (
|
||||
$1, $2, $3, $4
|
||||
)`,
|
||||
sessionID, crash.Timestamp, crash.Index, crashID,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.exec(`
|
||||
UPDATE sessions SET errors_count = errors_count + 1, issue_score = issue_score + 1000
|
||||
WHERE session_id = $1`,
|
||||
sessionID,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.commit()
|
||||
}
|
||||
|
||||
|
||||
200
backend/pkg/db/postgres/messages_web.go
Normal file
200
backend/pkg/db/postgres/messages_web.go
Normal file
|
|
@ -0,0 +1,200 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"openreplay/backend/pkg/hashid"
|
||||
"openreplay/backend/pkg/url"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
// TODO: change messages and replace everywhere to e.Index
|
||||
func getSqIdx(messageID uint64) uint {
|
||||
return uint(messageID % math.MaxInt32)
|
||||
}
|
||||
|
||||
|
||||
func (conn *Conn) InsertWebCustomEvent(sessionID uint64, e *CustomEvent) error {
|
||||
err := conn.InsertCustomEvent(sessionID, e.Timestamp,
|
||||
e.MessageID,
|
||||
e.Name, e.Payload)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, "CUSTOM", e.Name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertWebUserID(sessionID uint64, userID *UserID) error {
|
||||
err := conn.InsertUserID(sessionID, userID.ID)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, "USERID", userID.ID)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertWebUserAnonymousID(sessionID uint64, userAnonymousID *UserAnonymousID) error {
|
||||
err := conn.InsertUserAnonymousID(sessionID, userAnonymousID.ID)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, "USERANONYMOUSID", userAnonymousID.ID)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertWebResourceEvent(sessionID uint64, e *ResourceEvent) error {
|
||||
if e.Type != "fetch" {
|
||||
return nil
|
||||
}
|
||||
err := conn.InsertRequest(sessionID, e.Timestamp,
|
||||
e.MessageID,
|
||||
e.URL, e.Duration, e.Success,
|
||||
)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, "REQUEST", url.DiscardURLQuery(e.URL))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: fix column "dom_content_loaded_event_end" of relation "pages"
|
||||
func (conn *Conn) InsertWebPageEvent(sessionID uint64, e *PageEvent) error {
|
||||
host, path, err := url.GetURLParts(e.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tx, err := conn.begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.rollback()
|
||||
if err := tx.exec(`
|
||||
INSERT INTO events.pages (
|
||||
session_id, message_id, timestamp, referrer, base_referrer, host, path, base_path,
|
||||
dom_content_loaded_time, load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, time_to_interactive,
|
||||
response_time, dom_building_time
|
||||
) VALUES (
|
||||
$1, $2, $3, $4, $5, $6, $7, $8,
|
||||
NULLIF($9, 0), NULLIF($10, 0), NULLIF($11, 0), NULLIF($12, 0), NULLIF($13, 0), NULLIF($14, 0), NULLIF($15, 0), NULLIF($16, 0),
|
||||
NULLIF($17, 0), NULLIF($18, 0)
|
||||
)
|
||||
`,
|
||||
sessionID, e.MessageID, e.Timestamp, e.Referrer, url.DiscardURLQuery(e.Referrer), host, path, url.DiscardURLQuery(path),
|
||||
e.DomContentLoadedEventEnd, e.LoadEventEnd, e.ResponseEnd, e.FirstPaint, e.FirstContentfulPaint, e.SpeedIndex, e.VisuallyComplete, e.TimeToInteractive,
|
||||
calcResponseTime(e), calcDomBuildingTime(e),
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.exec(`
|
||||
UPDATE sessions SET pages_count = pages_count + 1
|
||||
WHERE session_id = $1`,
|
||||
sessionID,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
conn.insertAutocompleteValue(sessionID, url.DiscardURLQuery(path), "LOCATION")
|
||||
conn.insertAutocompleteValue(sessionID, url.DiscardURLQuery(e.Referrer), "REFERRER")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertWebClickEvent(sessionID uint64, e *ClickEvent) error {
|
||||
tx, err := conn.begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.rollback()
|
||||
if err = tx.exec(`
|
||||
INSERT INTO events.clicks
|
||||
(session_id, message_id, timestamp, label)
|
||||
VALUES
|
||||
($1, $2, $3, NULLIF($4, ''))
|
||||
`,
|
||||
sessionID, e.MessageID, e.Timestamp, e.Label,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.exec(`
|
||||
UPDATE sessions SET events_count = events_count + 1
|
||||
WHERE session_id = $1`,
|
||||
sessionID,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
conn.insertAutocompleteValue(sessionID, e.Label, "CLICK")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
func (conn *Conn) InsertWebInputEvent(sessionID uint64, e *InputEvent) error {
|
||||
tx, err := conn.begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.rollback()
|
||||
value := &e.Value
|
||||
if e.ValueMasked {
|
||||
value = nil
|
||||
}
|
||||
if err = tx.exec(`
|
||||
INSERT INTO events.inputs
|
||||
(session_id, message_id, timestamp, value, label)
|
||||
VALUES
|
||||
($1, $2, $3, $4, NULLIF($5,''))
|
||||
`,
|
||||
sessionID, e.MessageID, e.Timestamp, value, e.Label,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.exec(`
|
||||
UPDATE sessions SET events_count = events_count + 1
|
||||
WHERE session_id = $1`,
|
||||
sessionID,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
conn.insertAutocompleteValue(sessionID, e.Label, "INPUT")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertWebErrorEvent(sessionID uint64, projectID uint32, e *ErrorEvent) error {
|
||||
tx, err := conn.begin()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.rollback()
|
||||
errorID := hashid.WebErrorID(projectID, e)
|
||||
if err = tx.exec(`
|
||||
INSERT INTO errors
|
||||
(error_id, project_id, source, name, message, payload)
|
||||
VALUES
|
||||
($1, $2, $3, $4, $5, $6)
|
||||
ON CONFLICT DO NOTHING`,
|
||||
errorID, projectID, e.Source, e.Name, e.Message, e.Payload,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.exec(`
|
||||
INSERT INTO events.errors
|
||||
(session_id, message_id, timestamp, error_id)
|
||||
VALUES
|
||||
($1, $2, $3, $4)
|
||||
`,
|
||||
sessionID, e.MessageID, e.Timestamp, errorID,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = tx.exec(`
|
||||
UPDATE sessions SET errors_count = errors_count + 1, issue_score = issue_score + 1000
|
||||
WHERE session_id = $1`,
|
||||
sessionID,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
return tx.commit()
|
||||
}
|
||||
68
backend/pkg/db/postgres/messages_web_stats.go
Normal file
68
backend/pkg/db/postgres/messages_web_stats.go
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/url"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
||||
|
||||
func (conn *Conn) InsertWebStatsLongtask(sessionID uint64, l *LongTask) error {
|
||||
return nil // Do we even use them?
|
||||
// conn.exec(``);
|
||||
}
|
||||
|
||||
|
||||
func (conn *Conn) InsertWebStatsPerformance(sessionID uint64, p *PerformanceTrackAggr) error {
|
||||
timestamp := (p.TimestampEnd + p.TimestampStart) /2
|
||||
return conn.exec(`
|
||||
INSERT INTO events.performance (
|
||||
session_id, timestamp, message_id,
|
||||
min_fps, avg_fps, max_fps,
|
||||
min_cpu, avg_cpu, max_cpu,
|
||||
min_total_js_heap_size, avg_total_js_heap_size, max_total_js_heap_size,
|
||||
min_used_js_heap_size, avg_used_js_heap_size, max_used_js_heap_size
|
||||
) VALUES (
|
||||
$1, $2, $3,
|
||||
$4, $5, $6,
|
||||
$7, $8, $9,
|
||||
$10, $11, $12,
|
||||
$13, $14, $15
|
||||
)`,
|
||||
sessionID, timestamp, timestamp, // ??? TODO: primary key by timestamp+session_id
|
||||
p.MinFPS, p.AvgFPS, p.MaxFPS,
|
||||
p.MinCPU, p.AvgCPU, p.MinCPU,
|
||||
p.MinTotalJSHeapSize, p.AvgTotalJSHeapSize, p.MaxTotalJSHeapSize,
|
||||
p.MinUsedJSHeapSize, p.AvgUsedJSHeapSize, p.MaxUsedJSHeapSize,
|
||||
);
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertWebStatsResourceEvent(sessionID uint64, e *ResourceEvent) error {
|
||||
host, _, err := url.GetURLParts(e.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return conn.exec(`
|
||||
INSERT INTO events.resources (
|
||||
session_id, timestamp, message_id,
|
||||
type,
|
||||
url, url_host, url_hostpath,
|
||||
success, status,
|
||||
method,
|
||||
duration, ttfb, header_size, encoded_body_size, decoded_body_size
|
||||
) VALUES (
|
||||
$1, $2, $3,
|
||||
$4,
|
||||
$5, $6, $7,
|
||||
$8, $9,
|
||||
NULLIF($10, '')::events.resource_method,
|
||||
NULLIF($11, 0), NULLIF($12, 0), NULLIF($13, 0), NULLIF($14, 0), NULLIF($15, 0)
|
||||
)`,
|
||||
sessionID, e.Timestamp, e.MessageID,
|
||||
e.Type,
|
||||
e.URL, host, url.DiscardURLQuery(e.URL),
|
||||
e.Success, e.Status,
|
||||
url.EnsureMethod(e.Method),
|
||||
e.Duration, e.TTFB, e.HeaderSize, e.EncodedBodySize, e.DecodedBodySize,
|
||||
)
|
||||
}
|
||||
60
backend/pkg/db/postgres/notification.go
Normal file
60
backend/pkg/db/postgres/notification.go
Normal file
|
|
@ -0,0 +1,60 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
//go:generate $GOPATH/bin/easytags $GOFILE json
|
||||
|
||||
type TenantNotification struct {
|
||||
TenantId uint32 `db:"tenant_id" json:"tenantId"`
|
||||
Title string `db:"title" json:"title"`
|
||||
Description string `db:"description" json:"description"`
|
||||
ButtonText string `db:"button_text" json:"buttonText"`
|
||||
ButtonUrl string `db:"button_url" json:"buttonUrl"`
|
||||
ImageUrl *string `db:"image_url" json:"imageUrl"`
|
||||
Options map[string]interface{} `db:"options" json:"options"`
|
||||
}
|
||||
|
||||
type Notifications struct {
|
||||
Notifications []*TenantNotification `json:"notifications"`
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
func (n *Notifications) Send(url string) {
|
||||
n.Token = "nF46JdQqAM5v9KI9lPMpcu8o9xiJGvNNWOGL7TJP"
|
||||
body, err := json.Marshal(n)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
//log.Println("------------ Sending a new notification")
|
||||
req, err := http.NewRequest("POST", url, bytes.NewBuffer(body))
|
||||
if err != nil {
|
||||
log.Printf("error in POST notifications: %v\n", err)
|
||||
return
|
||||
}
|
||||
//req.Header.Set("X-Custom-Header", "myvalue")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
//log.Println("response Status:", resp.Status)
|
||||
//log.Println("response Headers:", resp.Header)
|
||||
//respBody, _ := ioutil.ReadAll(resp.Body)
|
||||
//log.Println("response Body:", string(respBody))
|
||||
}
|
||||
|
||||
func (n TenantNotification) Send(url string) {
|
||||
body := Notifications{
|
||||
Notifications: []*TenantNotification{&n},
|
||||
}
|
||||
body.Send(url)
|
||||
}
|
||||
45
backend/pkg/db/postgres/project.go
Normal file
45
backend/pkg/db/postgres/project.go
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"github.com/jackc/pgx/v4"
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
)
|
||||
|
||||
func (conn *Conn) GetProjectByKey(projectKey string) (*Project, error) {
|
||||
p := &Project{ ProjectKey: projectKey }
|
||||
if err := conn.queryRow(`
|
||||
SELECT max_session_duration, sample_rate, project_id
|
||||
FROM projects
|
||||
WHERE project_key=$1 AND active = true
|
||||
`,
|
||||
projectKey,
|
||||
).Scan(&p.MaxSessionDuration, &p.SampleRate, &p.ProjectID); err != nil {
|
||||
if err == pgx.ErrNoRows {
|
||||
err = nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// TODO: logical separation of metadata
|
||||
func (conn *Conn) GetProject(projectID uint32) (*Project, error) {
|
||||
p := &Project{ ProjectID: projectID }
|
||||
if err := conn.queryRow(`
|
||||
SELECT project_key, max_session_duration,
|
||||
metadata_1, metadata_2, metadata_3, metadata_4, metadata_5,
|
||||
metadata_6, metadata_7, metadata_8, metadata_9, metadata_10
|
||||
FROM projects
|
||||
WHERE project_id=$1 AND active = true
|
||||
`,
|
||||
projectID,
|
||||
).Scan(&p.ProjectKey,&p.MaxSessionDuration,
|
||||
&p.Metadata1, &p.Metadata2, &p.Metadata3, &p.Metadata4, &p.Metadata5,
|
||||
&p.Metadata6, &p.Metadata7, &p.Metadata8, &p.Metadata9, &p.Metadata10); err != nil {
|
||||
if err == pgx.ErrNoRows {
|
||||
err = nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
106
backend/pkg/db/postgres/session.go
Normal file
106
backend/pkg/db/postgres/session.go
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
package postgres
|
||||
|
||||
//import . "openreplay/backend/pkg/messages"
|
||||
import . "openreplay/backend/pkg/db/types"
|
||||
//import "log"
|
||||
|
||||
func (conn *Conn) GetSession(sessionID uint64) (*Session, error) {
|
||||
s := &Session{ SessionID: sessionID }
|
||||
var revID, userOSVersion *string
|
||||
if err := conn.queryRow(`
|
||||
SELECT platform,
|
||||
duration, project_id, start_ts,
|
||||
user_uuid, user_os, user_os_version,
|
||||
user_device, user_device_type, user_country,
|
||||
rev_id, tracker_version,
|
||||
user_id, user_anonymous_id,
|
||||
metadata_1, metadata_2, metadata_3, metadata_4, metadata_5,
|
||||
metadata_6, metadata_7, metadata_8, metadata_9, metadata_10
|
||||
FROM sessions
|
||||
WHERE session_id=$1
|
||||
`,
|
||||
sessionID,
|
||||
).Scan(&s.Platform,
|
||||
&s.Duration, &s.ProjectID, &s.Timestamp,
|
||||
&s.UserUUID, &s.UserOS, &userOSVersion,
|
||||
&s.UserDevice, &s.UserDeviceType, &s.UserCountry,
|
||||
&revID, &s.TrackerVersion,
|
||||
&s.UserID, &s.UserAnonymousID,
|
||||
&s.Metadata1, &s.Metadata2, &s.Metadata3, &s.Metadata4, &s.Metadata5,
|
||||
&s.Metadata6, &s.Metadata7, &s.Metadata8, &s.Metadata9, &s.Metadata10); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if userOSVersion != nil { // TODO: choose format, make f
|
||||
s.UserOSVersion = *userOSVersion
|
||||
}
|
||||
if revID != nil {
|
||||
s.RevID = *revID
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// func (conn *Conn) GetSessionClickEvents(sessionID uint64) (list []IOSClickEvent, err error) {
|
||||
// rows, err := conn.query(`
|
||||
// SELECT
|
||||
// timestamp, seq_index, label
|
||||
// FROM events_ios.clicks
|
||||
// WHERE session_id=$1
|
||||
// `, sessionID)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// defer rows.Close()
|
||||
// for rows.Next() {
|
||||
// e := new(IOSClickEvent)
|
||||
// if err = rows.Scan(&e.Timestamp, &e.Index, &e.Label); err != nil {
|
||||
// log.Printf("Error while scanning click events: %v", err)
|
||||
// } else {
|
||||
// list = append(list, e)
|
||||
// }
|
||||
// }
|
||||
// return list
|
||||
// }
|
||||
|
||||
// func (conn *Conn) GetSessionInputEvents(sessionID uint64) (list []IOSInputEvent, err error) {
|
||||
// rows, err := conn.query(`
|
||||
// SELECT
|
||||
// timestamp, seq_index, label, value
|
||||
// FROM events_ios.inputs
|
||||
// WHERE session_id=$1
|
||||
// `, sessionID)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// defer rows.Close()
|
||||
// for rows.Next() {
|
||||
// e := new(IOSInputEvent)
|
||||
// if err = rows.Scan(&e.Timestamp, &e.Index, &e.Label, &e.Value); err != nil {
|
||||
// log.Printf("Error while scanning click events: %v", err)
|
||||
// } else {
|
||||
// list = append(list, e)
|
||||
// }
|
||||
// }
|
||||
// return list
|
||||
// }
|
||||
|
||||
// func (conn *Conn) GetSessionCrashEvents(sessionID uint64) (list []IOSCrash, err error) {
|
||||
// rows, err := conn.query(`
|
||||
// SELECT
|
||||
// timestamp, seq_index
|
||||
// FROM events_ios.crashes
|
||||
// WHERE session_id=$1
|
||||
// `, sessionID)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// defer rows.Close()
|
||||
// for rows.Next() {
|
||||
// e := new(IOSCrash)
|
||||
// if err = rows.Scan(&e.Timestamp, &e.Index, &e.Label, &e.Value); err != nil {
|
||||
// log.Printf("Error while scanning click events: %v", err)
|
||||
// } else {
|
||||
// list = append(list, e)
|
||||
// }
|
||||
// }
|
||||
// return list
|
||||
// }
|
||||
45
backend/pkg/db/postgres/unstarted_session.go
Normal file
45
backend/pkg/db/postgres/unstarted_session.go
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
package postgres
|
||||
|
||||
type UnstartedSession struct {
|
||||
ProjectKey string
|
||||
TrackerVersion string
|
||||
DoNotTrack bool
|
||||
Platform string
|
||||
UserAgent string
|
||||
UserOS string
|
||||
UserOSVersion string
|
||||
UserBrowser string
|
||||
UserBrowserVersion string
|
||||
UserDevice string
|
||||
UserDeviceType string
|
||||
UserCountry string
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertUnstartedSession(s UnstartedSession) error {
|
||||
return conn.exec(`
|
||||
INSERT INTO unstarted_sessions (
|
||||
project_id,
|
||||
tracker_version, do_not_track,
|
||||
platform, user_agent,
|
||||
user_os, user_os_version,
|
||||
user_browser, user_browser_version,
|
||||
user_device, user_device_type,
|
||||
user_country
|
||||
) VALUES (
|
||||
(SELECT project_id FROM projects WHERE project_key = $1),
|
||||
$2, $3,
|
||||
$4, $5,
|
||||
$6, $7,
|
||||
$8, $9,
|
||||
$10, $11,
|
||||
$12
|
||||
)`,
|
||||
s.ProjectKey,
|
||||
s.TrackerVersion, s.DoNotTrack,
|
||||
s.Platform, s.UserAgent,
|
||||
s.UserOS, s.UserOSVersion,
|
||||
s.UserBrowser, s.UserBrowserVersion,
|
||||
s.UserDevice, s.UserDeviceType,
|
||||
s.UserCountry,
|
||||
)
|
||||
}
|
||||
53
backend/pkg/db/types/project.go
Normal file
53
backend/pkg/db/types/project.go
Normal file
|
|
@ -0,0 +1,53 @@
|
|||
package types
|
||||
|
||||
type Project struct {
|
||||
ProjectID uint32
|
||||
ProjectKey string
|
||||
MaxSessionDuration int64
|
||||
SampleRate byte
|
||||
Metadata1 *string
|
||||
Metadata2 *string
|
||||
Metadata3 *string
|
||||
Metadata4 *string
|
||||
Metadata5 *string
|
||||
Metadata6 *string
|
||||
Metadata7 *string
|
||||
Metadata8 *string
|
||||
Metadata9 *string
|
||||
Metadata10 *string
|
||||
}
|
||||
|
||||
|
||||
func (p *Project) GetMetadataNo(key string) uint {
|
||||
if p.Metadata1 != nil && *(p.Metadata1) == key {
|
||||
return 1
|
||||
}
|
||||
if p.Metadata2 != nil && *(p.Metadata2) == key {
|
||||
return 2
|
||||
}
|
||||
if p.Metadata3 != nil && *(p.Metadata3) == key {
|
||||
return 3
|
||||
}
|
||||
if p.Metadata4 != nil && *(p.Metadata4) == key {
|
||||
return 4
|
||||
}
|
||||
if p.Metadata5 != nil && *(p.Metadata5) == key {
|
||||
return 5
|
||||
}
|
||||
if p.Metadata6 != nil && *(p.Metadata6) == key {
|
||||
return 6
|
||||
}
|
||||
if p.Metadata7 != nil && *(p.Metadata7) == key {
|
||||
return 7
|
||||
}
|
||||
if p.Metadata8 != nil && *(p.Metadata8) == key {
|
||||
return 8
|
||||
}
|
||||
if p.Metadata9 != nil && *(p.Metadata9) == key {
|
||||
return 9
|
||||
}
|
||||
if p.Metadata10 != nil && *(p.Metadata10) == key {
|
||||
return 10
|
||||
}
|
||||
return 0
|
||||
}
|
||||
65
backend/pkg/db/types/session.go
Normal file
65
backend/pkg/db/types/session.go
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
package types
|
||||
|
||||
type Session struct {
|
||||
SessionID uint64
|
||||
Timestamp uint64
|
||||
ProjectID uint32
|
||||
TrackerVersion string
|
||||
RevID string
|
||||
UserUUID string
|
||||
UserOS string
|
||||
UserOSVersion string
|
||||
UserDevice string
|
||||
UserCountry string
|
||||
|
||||
Duration *uint64
|
||||
PagesCount int
|
||||
EventsCount int
|
||||
ErrorsCount int
|
||||
UserID *string
|
||||
UserAnonymousID *string
|
||||
Metadata1 *string
|
||||
Metadata2 *string
|
||||
Metadata3 *string
|
||||
Metadata4 *string
|
||||
Metadata5 *string
|
||||
Metadata6 *string
|
||||
Metadata7 *string
|
||||
Metadata8 *string
|
||||
Metadata9 *string
|
||||
Metadata10 *string
|
||||
|
||||
Platform string
|
||||
// Only-web properties
|
||||
UserAgent string
|
||||
UserBrowser string
|
||||
UserBrowserVersion string
|
||||
UserDeviceType string
|
||||
UserDeviceMemorySize uint64
|
||||
UserDeviceHeapSize uint64
|
||||
}
|
||||
|
||||
func (s *Session) SetMetadata(keyNo uint, value string) {
|
||||
switch (keyNo) {
|
||||
case 1:
|
||||
s.Metadata1 = &value
|
||||
case 2:
|
||||
s.Metadata2 = &value
|
||||
case 3:
|
||||
s.Metadata3 = &value
|
||||
case 4:
|
||||
s.Metadata4 = &value
|
||||
case 5:
|
||||
s.Metadata5 = &value
|
||||
case 6:
|
||||
s.Metadata6 = &value
|
||||
case 7:
|
||||
s.Metadata7 = &value
|
||||
case 8:
|
||||
s.Metadata8 = &value
|
||||
case 9:
|
||||
s.Metadata9 = &value
|
||||
case 10:
|
||||
s.Metadata10 = &value
|
||||
}
|
||||
}
|
||||
37
backend/pkg/dev/profiling/profiling.go
Normal file
37
backend/pkg/dev/profiling/profiling.go
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
package profiling
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"github.com/gorilla/mux"
|
||||
_ "net/http/pprof"
|
||||
)
|
||||
|
||||
func Profile() {
|
||||
go func() {
|
||||
router := mux.NewRouter()
|
||||
router.PathPrefix("/debug/pprof/").Handler(http.DefaultServeMux)
|
||||
log.Println("Starting profiler...")
|
||||
if err := http.ListenAndServe(":6060", router); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
|
||||
docker run -p 6060:6060 -e REQUIRED_ENV=http://value -e ANOTHER_ENV=anothervalue workername
|
||||
|
||||
THEN
|
||||
go tool pprof http://localhost:6060/debug/pprof/heap
|
||||
OR
|
||||
go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30
|
||||
|
||||
(Look up https://golang.org/pkg/net/http/pprof/)
|
||||
|
||||
|
||||
THEN
|
||||
https://www.speedscope.app/
|
||||
|
||||
*/
|
||||
29
backend/pkg/env/aws.go
vendored
Normal file
29
backend/pkg/env/aws.go
vendored
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
package env
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||
_session "github.com/aws/aws-sdk-go/aws/session"
|
||||
)
|
||||
|
||||
func AWSSessionOnRegion(region string) *_session.Session {
|
||||
AWS_ACCESS_KEY_ID := String("AWS_ACCESS_KEY_ID")
|
||||
AWS_SECRET_ACCESS_KEY := String("AWS_SECRET_ACCESS_KEY")
|
||||
config := &aws.Config{
|
||||
Region: aws.String(region),
|
||||
Credentials: credentials.NewStaticCredentials(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, ""),
|
||||
}
|
||||
AWS_ENDPOINT := StringOptional("AWS_ENDPOINT")
|
||||
if AWS_ENDPOINT != "" {
|
||||
config.Endpoint = aws.String(AWS_ENDPOINT)
|
||||
config.DisableSSL = aws.Bool(true)
|
||||
config.S3ForcePathStyle = aws.Bool(true)
|
||||
}
|
||||
aws_session, err := _session.NewSession(config)
|
||||
if err != nil {
|
||||
log.Fatalf("AWS session error: %v\n", err)
|
||||
}
|
||||
return aws_session
|
||||
}
|
||||
41
backend/pkg/env/fargate.go
vendored
Normal file
41
backend/pkg/env/fargate.go
vendored
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
package env
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
)
|
||||
|
||||
type fargateTaskContainer struct {
|
||||
Networks []struct {
|
||||
IPv4Addresses []string
|
||||
}
|
||||
}
|
||||
|
||||
func fargateTaskIP() (net.IP, error) {
|
||||
res, err := http.Get(os.Getenv("ECS_CONTAINER_METADATA_URI"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
|
||||
container := fargateTaskContainer{}
|
||||
if err := json.NewDecoder(res.Body).Decode(&container); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(container.Networks) != 1 {
|
||||
return nil, errors.New("container should have exactly one network")
|
||||
}
|
||||
network := container.Networks[0]
|
||||
if len(network.IPv4Addresses) != 1 {
|
||||
return nil, errors.New("container should have exactly one IPv4")
|
||||
}
|
||||
|
||||
ip := net.ParseIP(network.IPv4Addresses[0]).To4()
|
||||
if ip == nil {
|
||||
return nil, errors.New("invalid ip address")
|
||||
}
|
||||
return ip, nil
|
||||
}
|
||||
48
backend/pkg/env/vars.go
vendored
Normal file
48
backend/pkg/env/vars.go
vendored
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
package env
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func String(key string) string {
|
||||
v := os.Getenv(key)
|
||||
if v == "" {
|
||||
log.Fatalln(key + " is missing")
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func StringOptional(key string) string {
|
||||
return os.Getenv(key)
|
||||
}
|
||||
|
||||
func Uint16(key string) uint16 {
|
||||
v := String(key)
|
||||
n, _ := strconv.ParseUint(v, 10, 16)
|
||||
if n == 0 {
|
||||
log.Fatalln(key + " has a wrong value")
|
||||
}
|
||||
return uint16(n)
|
||||
}
|
||||
|
||||
func Uint64(key string) uint64 {
|
||||
v := String(key)
|
||||
n, _ := strconv.ParseUint(v, 10, 64)
|
||||
if n == 0 {
|
||||
log.Fatalln(key + " has a wrong value")
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func Bool(key string) bool {
|
||||
v := String(key)
|
||||
if v != "true" && v != "false" {
|
||||
log.Fatalln(key + " has wrong value. Accepted only true or false")
|
||||
}
|
||||
if v == "true" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
22
backend/pkg/env/worker_id.go
vendored
Normal file
22
backend/pkg/env/worker_id.go
vendored
Normal file
|
|
@ -0,0 +1,22 @@
|
|||
package env
|
||||
|
||||
import (
|
||||
"log"
|
||||
)
|
||||
|
||||
func hashHostname(hostname string) uint16 {
|
||||
var h uint16 ;
|
||||
for i, b := range hostname {
|
||||
h += uint16(i+1)*uint16(b)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func WorkerID() uint16 {
|
||||
ip, err := fargateTaskIP()
|
||||
if err != nil {
|
||||
log.Printf("Warning: unable to retrieve Fargate Task IP: %v; trying to use HOSTNAME instead", err)
|
||||
return hashHostname(String("HOSTNAME"))
|
||||
}
|
||||
return uint16(ip[2])<<8 + uint16(ip[3])
|
||||
}
|
||||
20
backend/pkg/flakeid/flakeid.go
Normal file
20
backend/pkg/flakeid/flakeid.go
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
package flakeid
|
||||
|
||||
const (
|
||||
SEQ_ID_SIZE = 8
|
||||
SHARD_ID_SIZE = 16
|
||||
TIMESTAMP_SIZE = 64 - SEQ_ID_SIZE - SHARD_ID_SHIFT
|
||||
SEQ_ID_MAX = 1<<SEQ_ID_SIZE - 1
|
||||
TIMESTAMP_MAX = 1<<TIMESTAMP_SIZE - 1
|
||||
TIMESTAMP_SHIFT = SEQ_ID_SIZE + SHARD_ID_SHIFT
|
||||
SHARD_ID_SHIFT = SEQ_ID_SIZE
|
||||
EPOCH = 1550000000000
|
||||
)
|
||||
|
||||
func compose(timestamp uint64, shardID uint16, seqID byte) uint64 {
|
||||
return (timestamp << TIMESTAMP_SHIFT) | (uint64(shardID) << SHARD_ID_SHIFT) | uint64(seqID)
|
||||
}
|
||||
|
||||
func extractTimestamp(id uint64) uint64 {
|
||||
return (id >> TIMESTAMP_SHIFT)
|
||||
}
|
||||
47
backend/pkg/flakeid/flaker.go
Normal file
47
backend/pkg/flakeid/flaker.go
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
package flakeid
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Flaker struct {
|
||||
shardID uint16
|
||||
seqID uint8
|
||||
seqMutex *sync.Mutex
|
||||
}
|
||||
|
||||
func NewFlaker(shardID uint16) *Flaker {
|
||||
return &Flaker{
|
||||
shardID: shardID,
|
||||
seqID: 0,
|
||||
seqMutex: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (flaker *Flaker) nextSeqID() uint8 {
|
||||
flaker.seqMutex.Lock()
|
||||
defer flaker.seqMutex.Unlock()
|
||||
seqID := flaker.seqID
|
||||
if seqID == SEQ_ID_MAX {
|
||||
flaker.seqID = 0
|
||||
} else {
|
||||
flaker.seqID = seqID + 1
|
||||
}
|
||||
return seqID
|
||||
}
|
||||
|
||||
func (flaker *Flaker) Compose(timestamp uint64) (uint64, error) {
|
||||
if timestamp <= EPOCH {
|
||||
return 0, errors.New("epoch is not in the past")
|
||||
}
|
||||
timestamp -= EPOCH
|
||||
if timestamp > TIMESTAMP_MAX {
|
||||
return 0, errors.New("epoch is too small")
|
||||
}
|
||||
return compose(timestamp, flaker.shardID, flaker.nextSeqID()), nil
|
||||
}
|
||||
|
||||
func (flaker *Flaker) ExtractTimestamp(id uint64) uint64 {
|
||||
return extractTimestamp(id) + EPOCH
|
||||
}
|
||||
35
backend/pkg/hashid/hashid.go
Normal file
35
backend/pkg/hashid/hashid.go
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
package hashid
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"hash/fnv"
|
||||
"strconv"
|
||||
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func IssueID(projectID uint32, e *messages.IssueEvent) string {
|
||||
hash := fnv.New128a()
|
||||
hash.Write([]byte(e.Type))
|
||||
hash.Write([]byte(e.ContextString))
|
||||
//hash.Write([]byte(e.Context)) // More detailed that contextString (what about Data Redundancy?)
|
||||
return strconv.FormatUint(uint64(projectID), 16) + hex.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
|
||||
|
||||
func IOSCrashID(projectID uint32, crash *messages.IOSCrash) string {
|
||||
hash := fnv.New128a()
|
||||
hash.Write([]byte(crash.Name))
|
||||
hash.Write([]byte(crash.Reason))
|
||||
hash.Write([]byte(crash.Stacktrace))
|
||||
return strconv.FormatUint(uint64(projectID), 16) + hex.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
|
||||
func WebErrorID(projectID uint32, errorEvent *messages.ErrorEvent) string {
|
||||
hash := fnv.New128a()
|
||||
hash.Write([]byte(errorEvent.Source))
|
||||
hash.Write([]byte(errorEvent.Name))
|
||||
hash.Write([]byte(errorEvent.Message))
|
||||
hash.Write([]byte(errorEvent.Payload))
|
||||
return strconv.FormatUint(uint64(projectID), 16) + hex.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
11
backend/pkg/intervals/intervals.go
Normal file
11
backend/pkg/intervals/intervals.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
package intervals
|
||||
|
||||
const EVENTS_COMMIT_INTERVAL = 1 * 60 * 1000
|
||||
const HEARTBEAT_INTERVAL = 2 * 60 * 1000
|
||||
const INTEGRATIONS_REQUEST_INTERVAL = 2 * 60 * 1000
|
||||
const EVENTS_PAGE_EVENT_TIMEOUT = 2 * 60 * 1000
|
||||
const EVENTS_INPUT_EVENT_TIMEOUT = 2 * 60 * 1000
|
||||
const EVENTS_PERFORMANCE_AGGREGATION_TIMEOUT = 2 * 60 * 1000
|
||||
const EVENTS_SESSION_END_TIMEOUT = HEARTBEAT_INTERVAL + 30 * 1000
|
||||
const EVENTS_SESSION_END_TIMEOUT_WITH_INTEGRATIONS = HEARTBEAT_INTERVAL + 3 * 60 * 1000
|
||||
const EVENTS_BACK_COMMIT_GAP = EVENTS_SESSION_END_TIMEOUT_WITH_INTEGRATIONS + 1*60*1000
|
||||
71
backend/pkg/messages/batch.go
Normal file
71
backend/pkg/messages/batch.go
Normal file
|
|
@ -0,0 +1,71 @@
|
|||
package messages
|
||||
|
||||
import (
|
||||
"io"
|
||||
"bytes"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func ReadBatch(b []byte, callback func(Message)) error {
|
||||
reader := bytes.NewReader(b)
|
||||
var index uint64
|
||||
var timestamp int64
|
||||
for {
|
||||
msg, err := ReadMessage(reader)
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return errors.Wrapf(err, "Batch Message decoding error on message with index %v", index)
|
||||
}
|
||||
isBatchMeta := false
|
||||
switch m := msg.(type){
|
||||
case *BatchMeta: // Is not required to be present in batch since IOS doesn't have it (though we might change it)
|
||||
if index != 0 { // Might be several 0-0 BatchMeta in a row without a error though
|
||||
return errors.New("Batch Meta found at the end of the batch")
|
||||
}
|
||||
index = m.PageNo << 32 + m.FirstIndex // 2^32 is the maximum count of messages per page (ha-ha)
|
||||
timestamp = m.Timestamp
|
||||
isBatchMeta = true
|
||||
// continue readLoop
|
||||
case *Timestamp:
|
||||
timestamp = int64(m.Timestamp) // TODO(?): replace timestamp type to int64 everywhere (including encoding part in tracker)
|
||||
// No skipping here for making it easy to encode back the same sequence of message
|
||||
// continue readLoop
|
||||
}
|
||||
msg.Meta().Index = index
|
||||
msg.Meta().Timestamp = timestamp
|
||||
callback(msg)
|
||||
if !isBatchMeta { // Without that indexes will be unique anyway, though shifted by 1 because BatchMeta is not counted in tracker
|
||||
index++
|
||||
}
|
||||
}
|
||||
return errors.New("Error of the codeflow. (Should return on EOF)")
|
||||
}
|
||||
|
||||
const AVG_MESSAGE_SIZE = 40 // TODO: calculate OR calculate dynamically
|
||||
func WriteBatch(mList []Message) []byte {
|
||||
batch := make([]byte, AVG_MESSAGE_SIZE * len(mList))
|
||||
p := 0
|
||||
for _, msg := range mList {
|
||||
msgBytes := msg.Encode()
|
||||
if len(batch) < p + len(msgBytes) {
|
||||
newBatch := make([]byte, 2*len(batch) + len(msgBytes))
|
||||
copy(newBatch, batch)
|
||||
batch = newBatch
|
||||
}
|
||||
copy(batch[p:], msgBytes)
|
||||
p += len(msgBytes)
|
||||
}
|
||||
return batch[:p]
|
||||
}
|
||||
|
||||
func RewriteBatch(b []byte, rewrite func(Message) Message) ([]byte, error) {
|
||||
mList := make([]Message, 0, len(b)/AVG_MESSAGE_SIZE)
|
||||
if err := ReadBatch(b, func(m Message) {
|
||||
mList = append(mList, rewrite(m))
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return WriteBatch(mList), nil
|
||||
}
|
||||
41
backend/pkg/messages/facade.go
Normal file
41
backend/pkg/messages/facade.go
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
package messages
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
//"io"
|
||||
)
|
||||
|
||||
func Encode(msg Message) []byte {
|
||||
return msg.Encode()
|
||||
}
|
||||
|
||||
//
|
||||
// func EncodeList(msgs []Message) []byte {
|
||||
|
||||
// }
|
||||
//
|
||||
|
||||
// func Decode(b []byte) (Message, error) {
|
||||
// return ReadMessage(bytes.NewReader(b))
|
||||
// }
|
||||
|
||||
// func DecodeEach(b []byte, callback func(Message)) error {
|
||||
// var err error
|
||||
// reader := bytes.NewReader(b)
|
||||
// for {
|
||||
// msg, err := ReadMessage(reader)
|
||||
// if err != nil {
|
||||
// break
|
||||
// }
|
||||
// callback(msg)
|
||||
// }
|
||||
// if err == io.EOF {
|
||||
// return nil
|
||||
// }
|
||||
// return err
|
||||
// }
|
||||
|
||||
func GetMessageTypeID(b []byte) (uint64, error) {
|
||||
reader := bytes.NewReader(b)
|
||||
return ReadUint(reader)
|
||||
}
|
||||
10
backend/pkg/messages/filters.go
Normal file
10
backend/pkg/messages/filters.go
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
package messages
|
||||
|
||||
|
||||
func IsReplayerType(id uint64) bool {
|
||||
return 0 == id || 4 == id || 5 == id || 6 == id || 7 == id || 8 == id || 9 == id || 10 == id || 11 == id || 12 == id || 13 == id || 14 == id || 15 == id || 16 == id || 18 == id || 19 == id || 20 == id || 22 == id || 37 == id || 38 == id || 39 == id || 40 == id || 41 == id || 44 == id || 45 == id || 46 == id || 47 == id || 48 == id || 49 == id || 54 == id || 55 == id || 59 == id || 90 == id || 93 == id || 100 == id || 102 == id || 103 == id || 105 == id
|
||||
}
|
||||
|
||||
func IsIOSType(id uint64) bool {
|
||||
return 90 == id || 91 == id || 92 == id || 93 == id || 94 == id || 95 == id || 96 == id || 97 == id || 98 == id || 99 == id || 100 == id || 101 == id || 102 == id || 103 == id || 104 == id || 105 == id || 110 == id || 111 == id
|
||||
}
|
||||
1506
backend/pkg/messages/messages.go
Normal file
1506
backend/pkg/messages/messages.go
Normal file
File diff suppressed because it is too large
Load diff
33
backend/pkg/messages/performance/performance.go
Normal file
33
backend/pkg/messages/performance/performance.go
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
package performance
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
|
||||
func TimeDiff(t1 uint64, t2 uint64) uint64 {
|
||||
if t1 < t2 {
|
||||
return 0
|
||||
}
|
||||
return t1 - t2
|
||||
}
|
||||
|
||||
func FrameRate(frames int64, dt uint64) float64 {
|
||||
return float64(frames) * 1000 / float64(dt)
|
||||
}
|
||||
|
||||
func TickRate(ticks int64, dt uint64) float64 {
|
||||
tickRate := float64(ticks) * 30 / float64(dt)
|
||||
if tickRate > 1 {
|
||||
tickRate = 1
|
||||
}
|
||||
return tickRate
|
||||
}
|
||||
|
||||
func CPURateFromTickRate(tickRate float64) uint64 {
|
||||
return 100 - uint64(math.Round(tickRate*100))
|
||||
}
|
||||
|
||||
func CPURate(ticks int64, dt uint64) uint64 {
|
||||
return CPURateFromTickRate(TickRate(ticks, dt))
|
||||
}
|
||||
155
backend/pkg/messages/primitives.go
Normal file
155
backend/pkg/messages/primitives.go
Normal file
|
|
@ -0,0 +1,155 @@
|
|||
package messages
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"encoding/json"
|
||||
"log"
|
||||
)
|
||||
|
||||
func ReadByte(reader io.Reader) (byte, error) {
|
||||
p := make([]byte, 1)
|
||||
_, err := io.ReadFull(reader, p)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return p[0], nil
|
||||
}
|
||||
|
||||
// func SkipBytes(reader io.ReadSeeker) error {
|
||||
// n, err := ReadUint(reader)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// _, err = reader.Seek(n, io.SeekCurrent);
|
||||
// return err
|
||||
// }
|
||||
|
||||
func ReadData(reader io.Reader) ([]byte, error) {
|
||||
n, err := ReadUint(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p := make([]byte, n)
|
||||
_, err = io.ReadFull(reader, p)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func ReadUint(reader io.Reader) (uint64, error) {
|
||||
var x uint64
|
||||
var s uint
|
||||
i := 0
|
||||
for {
|
||||
b, err := ReadByte(reader)
|
||||
if err != nil {
|
||||
return x, err
|
||||
}
|
||||
if b < 0x80 {
|
||||
if i > 9 || i == 9 && b > 1 {
|
||||
return x, errors.New("overflow")
|
||||
}
|
||||
return x | uint64(b)<<s, nil
|
||||
}
|
||||
x |= uint64(b&0x7f) << s
|
||||
s += 7
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
func ReadInt(reader io.Reader) (int64, error) {
|
||||
ux, err := ReadUint(reader)
|
||||
x := int64(ux >> 1)
|
||||
if err != nil {
|
||||
return x, err
|
||||
}
|
||||
if ux&1 != 0 {
|
||||
x = ^x
|
||||
}
|
||||
return x, err
|
||||
}
|
||||
|
||||
func ReadBoolean(reader io.Reader) (bool, error) {
|
||||
p := make([]byte, 1)
|
||||
_, err := io.ReadFull(reader, p)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return p[0] == 1, nil
|
||||
}
|
||||
|
||||
func ReadString(reader io.Reader) (string, error) {
|
||||
l, err := ReadUint(reader)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if l > 10e6 {
|
||||
return "", errors.New("Too long string")
|
||||
}
|
||||
buf := make([]byte, l)
|
||||
_, err = io.ReadFull(reader, buf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
func ReadJson(reader io.Reader) (interface{}, error) {
|
||||
jsonData, err := ReadData(reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var v interface{}
|
||||
if err = json.Unmarshal(jsonData, &v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func WriteUint(v uint64, buf []byte, p int) int {
|
||||
for v >= 0x80 {
|
||||
buf[p] = byte(v) | 0x80
|
||||
v >>= 7
|
||||
p++
|
||||
}
|
||||
buf[p] = byte(v)
|
||||
return p + 1
|
||||
}
|
||||
|
||||
func WriteInt(v int64, buf []byte, p int) int {
|
||||
uv := uint64(v) << 1
|
||||
if v < 0 {
|
||||
uv = ^uv
|
||||
}
|
||||
return WriteUint(uv, buf, p)
|
||||
}
|
||||
|
||||
func WriteBoolean(v bool, buf []byte, p int) int {
|
||||
if v {
|
||||
buf[p] = 1
|
||||
} else {
|
||||
buf[p] = 0
|
||||
}
|
||||
return p + 1
|
||||
}
|
||||
|
||||
func WriteString(str string, buf []byte, p int) int {
|
||||
p = WriteUint(uint64(len(str)), buf, p)
|
||||
return p + copy(buf[p:], str)
|
||||
}
|
||||
|
||||
func WriteData(data []byte, buf []byte, p int) int {
|
||||
p = WriteUint(uint64(len(data)), buf, p)
|
||||
return p + copy(buf[p:], data)
|
||||
}
|
||||
|
||||
func WriteJson(v interface{}, buf []byte, p int) int {
|
||||
data, err := json.Marshal(v)
|
||||
if err != nil {
|
||||
log.Printf("JSON encoding error: %v", err)
|
||||
return WriteString("null", buf, p)
|
||||
}
|
||||
return WriteData(data, buf, p)
|
||||
}
|
||||
678
backend/pkg/messages/read_message.go
Normal file
678
backend/pkg/messages/read_message.go
Normal file
|
|
@ -0,0 +1,678 @@
|
|||
// Auto-generated, do not edit
|
||||
package messages
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
func ReadMessage(reader io.Reader) (Message, error) {
|
||||
t, err := ReadUint(reader);
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch t {
|
||||
|
||||
case 80:
|
||||
msg := &BatchMeta{ meta: &meta{ TypeID: 80} }
|
||||
if msg.PageNo, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.FirstIndex, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Timestamp, err = ReadInt(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 0:
|
||||
msg := &Timestamp{ meta: &meta{ TypeID: 0} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 1:
|
||||
msg := &SessionStart{ meta: &meta{ TypeID: 1} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.ProjectID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.TrackerVersion, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.RevID, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.UserUUID, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.UserAgent, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.UserOS, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.UserOSVersion, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.UserBrowser, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.UserBrowserVersion, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.UserDevice, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.UserDeviceType, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.UserDeviceMemorySize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.UserDeviceHeapSize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.UserCountry, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 3:
|
||||
msg := &SessionEnd{ meta: &meta{ TypeID: 3} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 4:
|
||||
msg := &SetPageLocation{ meta: &meta{ TypeID: 4} }
|
||||
if msg.URL, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Referrer, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.NavigationStart, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 5:
|
||||
msg := &SetViewportSize{ meta: &meta{ TypeID: 5} }
|
||||
if msg.Width, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Height, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 6:
|
||||
msg := &SetViewportScroll{ meta: &meta{ TypeID: 6} }
|
||||
if msg.X, err = ReadInt(reader); err != nil { return nil, err }
|
||||
if msg.Y, err = ReadInt(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 7:
|
||||
msg := &CreateDocument{ meta: &meta{ TypeID: 7} }
|
||||
|
||||
return msg, nil
|
||||
|
||||
case 8:
|
||||
msg := &CreateElementNode{ meta: &meta{ TypeID: 8} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.ParentID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.index, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Tag, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.SVG, err = ReadBoolean(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 9:
|
||||
msg := &CreateTextNode{ meta: &meta{ TypeID: 9} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.ParentID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Index, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 10:
|
||||
msg := &MoveNode{ meta: &meta{ TypeID: 10} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.ParentID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Index, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 11:
|
||||
msg := &RemoveNode{ meta: &meta{ TypeID: 11} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 12:
|
||||
msg := &SetNodeAttribute{ meta: &meta{ TypeID: 12} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Name, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Value, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 13:
|
||||
msg := &RemoveNodeAttribute{ meta: &meta{ TypeID: 13} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Name, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 14:
|
||||
msg := &SetNodeData{ meta: &meta{ TypeID: 14} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Data, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 15:
|
||||
msg := &SetCSSData{ meta: &meta{ TypeID: 15} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Data, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 16:
|
||||
msg := &SetNodeScroll{ meta: &meta{ TypeID: 16} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.X, err = ReadInt(reader); err != nil { return nil, err }
|
||||
if msg.Y, err = ReadInt(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 17:
|
||||
msg := &SetInputTarget{ meta: &meta{ TypeID: 17} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Label, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 18:
|
||||
msg := &SetInputValue{ meta: &meta{ TypeID: 18} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Value, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Mask, err = ReadInt(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 19:
|
||||
msg := &SetInputChecked{ meta: &meta{ TypeID: 19} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Checked, err = ReadBoolean(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 20:
|
||||
msg := &MouseMove{ meta: &meta{ TypeID: 20} }
|
||||
if msg.X, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Y, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 21:
|
||||
msg := &MouseClick{ meta: &meta{ TypeID: 21} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.HesitationTime, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Label, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 22:
|
||||
msg := &ConsoleLog{ meta: &meta{ TypeID: 22} }
|
||||
if msg.Level, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Value, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 23:
|
||||
msg := &PageLoadTiming{ meta: &meta{ TypeID: 23} }
|
||||
if msg.RequestStart, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.ResponseStart, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.ResponseEnd, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.DomContentLoadedEventStart, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.DomContentLoadedEventEnd, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.LoadEventStart, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.LoadEventEnd, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.FirstPaint, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.FirstContentfulPaint, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 24:
|
||||
msg := &PageRenderTiming{ meta: &meta{ TypeID: 24} }
|
||||
if msg.SpeedIndex, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.VisuallyComplete, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.TimeToInteractive, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 25:
|
||||
msg := &JSException{ meta: &meta{ TypeID: 25} }
|
||||
if msg.Name, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Message, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Payload, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 26:
|
||||
msg := &RawErrorEvent{ meta: &meta{ TypeID: 26} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Source, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Name, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Message, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Payload, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 27:
|
||||
msg := &RawCustomEvent{ meta: &meta{ TypeID: 27} }
|
||||
if msg.Name, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Payload, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 28:
|
||||
msg := &UserID{ meta: &meta{ TypeID: 28} }
|
||||
if msg.ID, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 29:
|
||||
msg := &UserAnonymousID{ meta: &meta{ TypeID: 29} }
|
||||
if msg.ID, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 30:
|
||||
msg := &Metadata{ meta: &meta{ TypeID: 30} }
|
||||
if msg.Key, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Value, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 31:
|
||||
msg := &PageEvent{ meta: &meta{ TypeID: 31} }
|
||||
if msg.MessageID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.URL, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Referrer, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Loaded, err = ReadBoolean(reader); err != nil { return nil, err }
|
||||
if msg.RequestStart, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.ResponseStart, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.ResponseEnd, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.DomContentLoadedEventStart, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.DomContentLoadedEventEnd, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.LoadEventStart, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.LoadEventEnd, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.FirstPaint, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.FirstContentfulPaint, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.SpeedIndex, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.VisuallyComplete, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.TimeToInteractive, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 32:
|
||||
msg := &InputEvent{ meta: &meta{ TypeID: 32} }
|
||||
if msg.MessageID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Value, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.ValueMasked, err = ReadBoolean(reader); err != nil { return nil, err }
|
||||
if msg.Label, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 33:
|
||||
msg := &ClickEvent{ meta: &meta{ TypeID: 33} }
|
||||
if msg.MessageID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.HesitationTime, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Label, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 34:
|
||||
msg := &ErrorEvent{ meta: &meta{ TypeID: 34} }
|
||||
if msg.MessageID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Source, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Name, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Message, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Payload, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 35:
|
||||
msg := &ResourceEvent{ meta: &meta{ TypeID: 35} }
|
||||
if msg.MessageID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Duration, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.TTFB, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.HeaderSize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.EncodedBodySize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.DecodedBodySize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.URL, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Type, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Success, err = ReadBoolean(reader); err != nil { return nil, err }
|
||||
if msg.Method, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Status, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 36:
|
||||
msg := &CustomEvent{ meta: &meta{ TypeID: 36} }
|
||||
if msg.MessageID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Name, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Payload, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 37:
|
||||
msg := &CSSInsertRule{ meta: &meta{ TypeID: 37} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Rule, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Index, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 38:
|
||||
msg := &CSSDeleteRule{ meta: &meta{ TypeID: 38} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Index, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 39:
|
||||
msg := &Fetch{ meta: &meta{ TypeID: 39} }
|
||||
if msg.Method, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.URL, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Request, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Response, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Status, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Duration, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 40:
|
||||
msg := &Profiler{ meta: &meta{ TypeID: 40} }
|
||||
if msg.Name, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Duration, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Args, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Result, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 41:
|
||||
msg := &OTable{ meta: &meta{ TypeID: 41} }
|
||||
if msg.Key, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Value, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 42:
|
||||
msg := &StateAction{ meta: &meta{ TypeID: 42} }
|
||||
if msg.Type, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 43:
|
||||
msg := &StateActionEvent{ meta: &meta{ TypeID: 43} }
|
||||
if msg.MessageID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Type, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 44:
|
||||
msg := &Redux{ meta: &meta{ TypeID: 44} }
|
||||
if msg.Action, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.State, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Duration, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 45:
|
||||
msg := &Vuex{ meta: &meta{ TypeID: 45} }
|
||||
if msg.Mutation, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.State, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 46:
|
||||
msg := &MobX{ meta: &meta{ TypeID: 46} }
|
||||
if msg.Type, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Payload, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 47:
|
||||
msg := &NgRx{ meta: &meta{ TypeID: 47} }
|
||||
if msg.Action, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.State, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Duration, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 48:
|
||||
msg := &GraphQL{ meta: &meta{ TypeID: 48} }
|
||||
if msg.OperationKind, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.OperationName, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Variables, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Response, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 49:
|
||||
msg := &PerformanceTrack{ meta: &meta{ TypeID: 49} }
|
||||
if msg.Frames, err = ReadInt(reader); err != nil { return nil, err }
|
||||
if msg.Ticks, err = ReadInt(reader); err != nil { return nil, err }
|
||||
if msg.TotalJSHeapSize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.UsedJSHeapSize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 50:
|
||||
msg := &GraphQLEvent{ meta: &meta{ TypeID: 50} }
|
||||
if msg.MessageID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Name, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 52:
|
||||
msg := &DOMDrop{ meta: &meta{ TypeID: 52} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 53:
|
||||
msg := &ResourceTiming{ meta: &meta{ TypeID: 53} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Duration, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.TTFB, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.HeaderSize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.EncodedBodySize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.DecodedBodySize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.URL, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Initiator, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 54:
|
||||
msg := &ConnectionInformation{ meta: &meta{ TypeID: 54} }
|
||||
if msg.Downlink, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Type, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 55:
|
||||
msg := &SetPageVisibility{ meta: &meta{ TypeID: 55} }
|
||||
if msg.hidden, err = ReadBoolean(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 56:
|
||||
msg := &PerformanceTrackAggr{ meta: &meta{ TypeID: 56} }
|
||||
if msg.TimestampStart, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.TimestampEnd, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MinFPS, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.AvgFPS, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MaxFPS, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MinCPU, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.AvgCPU, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MaxCPU, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MinTotalJSHeapSize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.AvgTotalJSHeapSize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MaxTotalJSHeapSize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MinUsedJSHeapSize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.AvgUsedJSHeapSize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MaxUsedJSHeapSize, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 59:
|
||||
msg := &LongTask{ meta: &meta{ TypeID: 59} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Duration, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Context, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.ContainerType, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.ContainerSrc, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.ContainerId, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.ContainerName, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 60:
|
||||
msg := &SetNodeAttributeURLBased{ meta: &meta{ TypeID: 60} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Name, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Value, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.BaseURL, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 61:
|
||||
msg := &SetCSSDataURLBased{ meta: &meta{ TypeID: 61} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Data, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.BaseURL, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 62:
|
||||
msg := &IssueEvent{ meta: &meta{ TypeID: 62} }
|
||||
if msg.MessageID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Type, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.ContextString, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Context, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Payload, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 63:
|
||||
msg := &TechnicalInfo{ meta: &meta{ TypeID: 63} }
|
||||
if msg.Type, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Value, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 64:
|
||||
msg := &CustomIssue{ meta: &meta{ TypeID: 64} }
|
||||
if msg.Name, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Payload, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 65:
|
||||
msg := &PageClose{ meta: &meta{ TypeID: 65} }
|
||||
|
||||
return msg, nil
|
||||
|
||||
case 66:
|
||||
msg := &AssetCache{ meta: &meta{ TypeID: 66} }
|
||||
if msg.URL, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 67:
|
||||
msg := &CSSInsertRuleURLBased{ meta: &meta{ TypeID: 67} }
|
||||
if msg.ID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Rule, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Index, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.BaseURL, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 90:
|
||||
msg := &IOSSessionStart{ meta: &meta{ TypeID: 90} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.ProjectID, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.TrackerVersion, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.RevID, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.UserUUID, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.UserOS, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.UserOSVersion, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.UserDevice, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.UserDeviceType, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.UserCountry, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 91:
|
||||
msg := &IOSSessionEnd{ meta: &meta{ TypeID: 91} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 92:
|
||||
msg := &IOSMetadata{ meta: &meta{ TypeID: 92} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Length, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Key, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Value, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 93:
|
||||
msg := &IOSCustomEvent{ meta: &meta{ TypeID: 93} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Length, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Name, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Payload, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 94:
|
||||
msg := &IOSUserID{ meta: &meta{ TypeID: 94} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Length, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Value, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 95:
|
||||
msg := &IOSUserAnonymousID{ meta: &meta{ TypeID: 95} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Length, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Value, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 96:
|
||||
msg := &IOSScreenChanges{ meta: &meta{ TypeID: 96} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.SkipData, err = ReadData(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 97:
|
||||
msg := &IOSCrash{ meta: &meta{ TypeID: 97} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Length, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Name, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Reason, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Stacktrace, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 98:
|
||||
msg := &IOSScreenEnter{ meta: &meta{ TypeID: 98} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Length, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Title, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.ViewName, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 99:
|
||||
msg := &IOSScreenLeave{ meta: &meta{ TypeID: 99} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Length, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Title, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.ViewName, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 100:
|
||||
msg := &IOSClickEvent{ meta: &meta{ TypeID: 100} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Length, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Label, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.X, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Y, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 101:
|
||||
msg := &IOSInputEvent{ meta: &meta{ TypeID: 101} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Length, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Value, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.ValueMasked, err = ReadBoolean(reader); err != nil { return nil, err }
|
||||
if msg.Label, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 102:
|
||||
msg := &IOSPerformanceEvent{ meta: &meta{ TypeID: 102} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Length, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Name, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Value, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 103:
|
||||
msg := &IOSLog{ meta: &meta{ TypeID: 103} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Length, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Severity, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Content, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 104:
|
||||
msg := &IOSInternalError{ meta: &meta{ TypeID: 104} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Length, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Content, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 105:
|
||||
msg := &IOSNetworkCall{ meta: &meta{ TypeID: 105} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Length, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Duration, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Headers, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Body, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.URL, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Success, err = ReadBoolean(reader); err != nil { return nil, err }
|
||||
if msg.Method, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Status, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 110:
|
||||
msg := &IOSPerformanceAggregated{ meta: &meta{ TypeID: 110} }
|
||||
if msg.TimestampStart, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.TimestampEnd, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MinFPS, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.AvgFPS, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MaxFPS, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MinCPU, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.AvgCPU, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MaxCPU, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MinMemory, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.AvgMemory, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MaxMemory, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MinBattery, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.AvgBattery, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.MaxBattery, err = ReadUint(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
case 111:
|
||||
msg := &IOSIssueEvent{ meta: &meta{ TypeID: 111} }
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil { return nil, err }
|
||||
if msg.Type, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.ContextString, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Context, err = ReadString(reader); err != nil { return nil, err }
|
||||
if msg.Payload, err = ReadString(reader); err != nil { return nil, err }
|
||||
return msg, nil
|
||||
|
||||
}
|
||||
return nil, fmt.Errorf("Unknown message code: %v", t)
|
||||
}
|
||||
15
backend/pkg/queue/import.go
Normal file
15
backend/pkg/queue/import.go
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
package queue
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/redisstream"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
func NewConsumer(group string, topics []string, handler types.MessageHandler) types.Consumer {
|
||||
return redisstream.NewConsumer(group, topics, handler)
|
||||
}
|
||||
|
||||
func NewProducer() types.Producer {
|
||||
return redisstream.NewProducer()
|
||||
}
|
||||
|
||||
19
backend/pkg/queue/messages.go
Normal file
19
backend/pkg/queue/messages.go
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
package queue
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
|
||||
func NewMessageConsumer(group string, topics []string, handler types.DecodedMessageHandler) types.Consumer {
|
||||
return NewConsumer(group, topics, func(sessionID uint64, value []byte, meta *types.Meta) {
|
||||
if err := messages.ReadBatch(value, func(msg messages.Message) {
|
||||
handler(sessionID, msg, meta)
|
||||
}); err != nil {
|
||||
log.Printf("Decode error: %v\n", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
31
backend/pkg/queue/types/types.go
Normal file
31
backend/pkg/queue/types/types.go
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
type Consumer interface {
|
||||
ConsumeNext() error
|
||||
DisableAutoCommit()
|
||||
Commit() error
|
||||
CommitBack(gap int64) error
|
||||
Close()
|
||||
}
|
||||
|
||||
|
||||
type Producer interface {
|
||||
Produce(topic string, key uint64, value []byte) error
|
||||
Close(timeout int)
|
||||
Flush(timeout int)
|
||||
}
|
||||
|
||||
|
||||
type Meta struct {
|
||||
ID uint64
|
||||
Topic string
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
type MessageHandler func(uint64, []byte, *Meta)
|
||||
type DecodedMessageHandler func(uint64, messages.Message, *Meta)
|
||||
|
||||
170
backend/pkg/redisstream/consumer.go
Normal file
170
backend/pkg/redisstream/consumer.go
Normal file
|
|
@ -0,0 +1,170 @@
|
|||
package redisstream
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"log"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
_redis "github.com/go-redis/redis"
|
||||
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
|
||||
|
||||
type idsInfo struct{
|
||||
id []string
|
||||
ts []int64
|
||||
}
|
||||
type streamPendingIDsMap map[string]*idsInfo
|
||||
|
||||
type Consumer struct {
|
||||
redis *_redis.Client
|
||||
streams []string
|
||||
group string
|
||||
messageHandler types.MessageHandler
|
||||
idsPending streamPendingIDsMap
|
||||
lastTs int64
|
||||
autoCommit bool
|
||||
}
|
||||
|
||||
func NewConsumer(group string, streams []string, messageHandler types.MessageHandler) *Consumer {
|
||||
redis := getRedisClient()
|
||||
for _, stream := range streams {
|
||||
err := redis.XGroupCreateMkStream(stream, group, "0").Err()
|
||||
if err != nil && err.Error() != "BUSYGROUP Consumer Group name already exists" {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
idsPending := make(streamPendingIDsMap)
|
||||
|
||||
streamsCount := len(streams)
|
||||
for i := 0; i < streamsCount; i++ {
|
||||
// ">" is for never-delivered messages.
|
||||
// Otherwise - never acknoledged only
|
||||
// TODO: understand why in case of "0" it eats 100% cpu
|
||||
streams = append(streams, ">")
|
||||
|
||||
idsPending[streams[i]] = new(idsInfo)
|
||||
}
|
||||
|
||||
return &Consumer{
|
||||
redis: redis,
|
||||
messageHandler: messageHandler,
|
||||
streams: streams,
|
||||
group: group,
|
||||
autoCommit: true,
|
||||
idsPending: idsPending,
|
||||
}
|
||||
}
|
||||
|
||||
const READ_COUNT = 10
|
||||
|
||||
func (c *Consumer) ConsumeNext() error {
|
||||
// MBTODO: read in go routine, send messages to channel
|
||||
res, err := c.redis.XReadGroup(&_redis.XReadGroupArgs{
|
||||
Group: c.group,
|
||||
Consumer: c.group,
|
||||
Streams: c.streams,
|
||||
Count: int64(READ_COUNT),
|
||||
Block: 200 * time.Millisecond,
|
||||
}).Result()
|
||||
if err != nil {
|
||||
if err, ok := err.(net.Error); ok && err.Timeout() {
|
||||
return nil
|
||||
}
|
||||
if err == _redis.Nil {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
for _, r := range res {
|
||||
for _, m := range r.Messages {
|
||||
sessionIDString, ok := m.Values["sessionID"].(string)
|
||||
if !ok {
|
||||
return errors.Errorf("Can not cast value for messageID %v", m.ID)
|
||||
}
|
||||
sessionID, err := strconv.ParseUint(sessionIDString, 10, 64)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "Can not parse sessionID '%v' for messageID %v", sessionID, m.ID)
|
||||
}
|
||||
valueString, ok := m.Values["value"].(string)
|
||||
if !ok {
|
||||
return errors.Errorf("Can not cast value for messageID %v", m.ID)
|
||||
}
|
||||
// assumming that ID has a correct format
|
||||
idParts := strings.Split(m.ID, "-")
|
||||
ts, _ := strconv.ParseUint(idParts[0], 10, 64)
|
||||
idx, _ := strconv.ParseUint(idParts[1], 10, 64)
|
||||
if idx > 0x1FFF {
|
||||
return errors.New("Too many messages per ms in redis")
|
||||
}
|
||||
c.messageHandler(sessionID, []byte(valueString), &types.Meta{
|
||||
Topic: r.Stream,
|
||||
Timestamp: int64(ts),
|
||||
ID: ts << 13 | (idx & 0x1FFF), // Max: 4096 messages/ms for 69 years
|
||||
})
|
||||
if c.autoCommit {
|
||||
if err = c.redis.XAck(r.Stream, c.group, m.ID).Err(); err != nil {
|
||||
return errors.Wrapf(err, "Acknoledgment error for messageID %v", m.ID)
|
||||
}
|
||||
} else {
|
||||
c.lastTs = int64(ts)
|
||||
c.idsPending[r.Stream].id = append(c.idsPending[r.Stream].id, m.ID)
|
||||
c.idsPending[r.Stream].ts = append(c.idsPending[r.Stream].ts, int64(ts))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Consumer) Commit() error {
|
||||
for stream, idsInfo := range c.idsPending {
|
||||
if len(idsInfo.id) == 0 {
|
||||
continue
|
||||
}
|
||||
if err := c.redis.XAck(stream, c.group, idsInfo.id...).Err(); err != nil {
|
||||
return errors.Wrapf(err, "Redisstreams: Acknoledgment error on commit %v", err)
|
||||
}
|
||||
c.idsPending[stream].id = nil
|
||||
c.idsPending[stream].ts = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Consumer) CommitBack(gap int64) error {
|
||||
if c.lastTs == 0 {
|
||||
return nil
|
||||
}
|
||||
maxTs := c.lastTs - gap
|
||||
|
||||
for stream, idsInfo := range c.idsPending {
|
||||
if len(idsInfo.id) == 0 {
|
||||
continue
|
||||
}
|
||||
maxI := sort.Search(len(idsInfo.ts), func(i int) bool {
|
||||
return idsInfo.ts[i] > maxTs
|
||||
})
|
||||
if err := c.redis.XAck(stream, c.group, idsInfo.id[:maxI]...).Err(); err != nil {
|
||||
return errors.Wrapf(err, "Redisstreams: Acknoledgment error on commit %v", err)
|
||||
}
|
||||
c.idsPending[stream].id = idsInfo.id[maxI:]
|
||||
c.idsPending[stream].ts = idsInfo.ts[maxI:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Consumer) DisableAutoCommit() {
|
||||
//c.autoCommit = false
|
||||
}
|
||||
|
||||
func (c *Consumer) Close() {
|
||||
// noop
|
||||
}
|
||||
44
backend/pkg/redisstream/producer.go
Normal file
44
backend/pkg/redisstream/producer.go
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
package redisstream
|
||||
|
||||
import (
|
||||
"github.com/go-redis/redis"
|
||||
|
||||
"openreplay/backend/pkg/env"
|
||||
)
|
||||
|
||||
|
||||
type Producer struct {
|
||||
redis *redis.Client
|
||||
maxLenApprox int64
|
||||
}
|
||||
|
||||
func NewProducer() *Producer {
|
||||
return &Producer{
|
||||
redis: getRedisClient(),
|
||||
maxLenApprox: int64(env.Uint64("REDIS_STREAMS_MAX_LEN")),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Producer) Produce(topic string, key uint64, value []byte) error {
|
||||
args := &redis.XAddArgs{
|
||||
Stream: topic,
|
||||
Values: map[string]interface{}{
|
||||
"sessionID": key,
|
||||
"value": value,
|
||||
},
|
||||
}
|
||||
args.MaxLenApprox = p.maxLenApprox
|
||||
|
||||
_, err := p.redis.XAdd(args).Result()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Producer) Close(_ int) {
|
||||
// noop
|
||||
}
|
||||
func (p *Producer) Flush(_ int) {
|
||||
// noop
|
||||
}
|
||||
26
backend/pkg/redisstream/redis.go
Normal file
26
backend/pkg/redisstream/redis.go
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
package redisstream
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/go-redis/redis"
|
||||
|
||||
"openreplay/backend/pkg/env"
|
||||
)
|
||||
|
||||
|
||||
var redisClient *redis.Client
|
||||
|
||||
|
||||
func getRedisClient() *redis.Client {
|
||||
if redisClient != nil {
|
||||
return redisClient
|
||||
}
|
||||
redisClient = redis.NewClient(&redis.Options{
|
||||
Addr: env.String("REDIS_STRING"),
|
||||
})
|
||||
if _, err := redisClient.Ping().Result(); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
return redisClient
|
||||
}
|
||||
98
backend/pkg/storage/s3.go
Normal file
98
backend/pkg/storage/s3.go
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strconv"
|
||||
"sort"
|
||||
|
||||
_s3 "github.com/aws/aws-sdk-go/service/s3"
|
||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||
|
||||
"openreplay/backend/pkg/env"
|
||||
)
|
||||
|
||||
type S3 struct {
|
||||
uploader *s3manager.Uploader
|
||||
svc *_s3.S3
|
||||
bucket *string
|
||||
}
|
||||
|
||||
|
||||
func NewS3(region string, bucket string) *S3 {
|
||||
sess := env.AWSSessionOnRegion(region)
|
||||
return &S3{
|
||||
uploader: s3manager.NewUploader(sess),
|
||||
svc: _s3.New(sess), // AWS Docs: "These clients are safe to use concurrently."
|
||||
bucket: &bucket,
|
||||
}
|
||||
}
|
||||
|
||||
func (s3 *S3) Upload(reader io.Reader, key string, contentType string, gzipped bool) error {
|
||||
cacheControl := "max-age=2628000, immutable, private"
|
||||
var contentEncoding *string
|
||||
if gzipped {
|
||||
gzipStr := "gzip"
|
||||
contentEncoding = &gzipStr
|
||||
}
|
||||
_, err := s3.uploader.Upload(&s3manager.UploadInput{
|
||||
Body: reader,
|
||||
Bucket: s3.bucket,
|
||||
Key: &key,
|
||||
ContentType: &contentType,
|
||||
CacheControl: &cacheControl,
|
||||
ContentEncoding: contentEncoding,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (s3 *S3) Get(key string) (io.ReadCloser, error) {
|
||||
out, err := s3.svc.GetObject(&_s3.GetObjectInput{
|
||||
Bucket: s3.bucket,
|
||||
Key: &key,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out.Body, nil
|
||||
}
|
||||
|
||||
func (s3 *S3) Exists(key string) bool {
|
||||
_, err := s3.svc.HeadObject(&_s3.HeadObjectInput{
|
||||
Bucket: s3.bucket,
|
||||
Key: &key,
|
||||
})
|
||||
if err == nil {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
const MAX_RETURNING_COUNT = 40
|
||||
func (s3 *S3) GetFrequentlyUsedKeys(projectID uint64) ([]string, error) {
|
||||
prefix := strconv.FormatUint(projectID, 10) + "/"
|
||||
output, err := s3.svc.ListObjectsV2(&_s3.ListObjectsV2Input{
|
||||
Bucket: s3.bucket,
|
||||
Prefix: &prefix,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//pagination may be here
|
||||
|
||||
list := output.Contents
|
||||
max := len(list)
|
||||
if (max > MAX_RETURNING_COUNT) {
|
||||
max = MAX_RETURNING_COUNT
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
return list[i].LastModified.After(*(list[j].LastModified))
|
||||
})
|
||||
}
|
||||
|
||||
var keyList []string
|
||||
s := len(prefix)
|
||||
for _, obj := range list[:max] {
|
||||
keyList = append(keyList, (*obj.Key)[s:])
|
||||
}
|
||||
return keyList, nil
|
||||
}
|
||||
18
backend/pkg/token/http.go
Normal file
18
backend/pkg/token/http.go
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
package token
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const BEARER_SCHEMA = "Bearer "
|
||||
|
||||
func (tokenizer *Tokenizer) ParseFromHTTPRequest(r *http.Request) (*TokenData, error) {
|
||||
header := r.Header.Get("Authorization")
|
||||
if !strings.HasPrefix(header, BEARER_SCHEMA) {
|
||||
return nil, errors.New("Missing token")
|
||||
}
|
||||
token := header[len(BEARER_SCHEMA):]
|
||||
return tokenizer.Parse(token)
|
||||
}
|
||||
65
backend/pkg/token/tokenizer.go
Normal file
65
backend/pkg/token/tokenizer.go
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
package token
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcutil/base58"
|
||||
)
|
||||
|
||||
var EXPIRED = errors.New("token expired")
|
||||
|
||||
type Tokenizer struct {
|
||||
secret []byte
|
||||
}
|
||||
|
||||
func NewTokenizer(secret string) *Tokenizer {
|
||||
return &Tokenizer{[]byte(secret)}
|
||||
}
|
||||
|
||||
type TokenData struct {
|
||||
ID uint64
|
||||
ExpTime int64
|
||||
}
|
||||
|
||||
func (tokenizer *Tokenizer) sign(body string) []byte {
|
||||
mac := hmac.New(sha256.New, tokenizer.secret)
|
||||
mac.Write([]byte(body))
|
||||
return mac.Sum(nil)
|
||||
}
|
||||
|
||||
func (tokenizer *Tokenizer) Compose(d TokenData) string {
|
||||
body := strconv.FormatUint(d.ID, 36) +
|
||||
"." + strconv.FormatInt(d.ExpTime, 36)
|
||||
sign := base58.Encode(tokenizer.sign(body))
|
||||
return body + "." + sign
|
||||
}
|
||||
|
||||
func (tokenizer *Tokenizer) Parse(token string) (*TokenData, error) {
|
||||
data := strings.Split(token, ".")
|
||||
if len(data) != 3 {
|
||||
return nil, errors.New("wrong token format")
|
||||
}
|
||||
if !hmac.Equal(
|
||||
base58.Decode(data[len(data)-1]),
|
||||
tokenizer.sign(data[0]+"."+data[1]),
|
||||
) {
|
||||
return nil, errors.New("wrong token sign")
|
||||
}
|
||||
id, err := strconv.ParseUint(data[0], 36, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
expTime, err := strconv.ParseInt(data[1], 36, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if expTime <= time.Now().UnixNano()/1e6 {
|
||||
return &TokenData{id,expTime}, EXPIRED
|
||||
}
|
||||
return &TokenData{id,expTime}, nil
|
||||
}
|
||||
63
backend/pkg/url/assets/css.go
Normal file
63
backend/pkg/url/assets/css.go
Normal file
|
|
@ -0,0 +1,63 @@
|
|||
package assets
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// TODO: ignore data: , escaped quotes , spaces between brackets?
|
||||
var cssURLs = regexp.MustCompile(`url\(("[^"]*"|'[^']*'|[^)]*)\)`)
|
||||
var cssImports = regexp.MustCompile(`@import "(.*?)"`)
|
||||
|
||||
func cssUrlsIndex(css string) [][]int {
|
||||
var idxs [][]int
|
||||
for _, match := range cssURLs.FindAllStringSubmatchIndex(css, -1) {
|
||||
idxs = append(idxs, match[2:])
|
||||
}
|
||||
for _, match := range cssImports.FindAllStringSubmatchIndex(css, -1) {
|
||||
idxs = append(idxs, match[2:])
|
||||
}
|
||||
sort.Slice(idxs, func(i, j int) bool {
|
||||
return idxs[i][0] > idxs[j][0]
|
||||
})
|
||||
return idxs
|
||||
}
|
||||
|
||||
func unquote(str string) (string, string) {
|
||||
str = strings.TrimSpace(str)
|
||||
if len(str) <= 2 {
|
||||
return str, ""
|
||||
}
|
||||
if str[0] == '"' && str[len(str)-1] == '"' {
|
||||
return str[1 : len(str)-1], "\""
|
||||
}
|
||||
if str[0] == '\'' && str[len(str)-1] == '\'' {
|
||||
return str[1 : len(str)-1], "'"
|
||||
}
|
||||
return str, ""
|
||||
}
|
||||
|
||||
func ExtractURLsFromCSS(css string) []string {
|
||||
indexes := cssUrlsIndex(css)
|
||||
urls := make([]string, len(indexes))
|
||||
for _, idx := range indexes {
|
||||
|
||||
f := idx[0]
|
||||
t := idx[1]
|
||||
rawurl, _ := unquote(css[f:t])
|
||||
urls = append(urls, rawurl)
|
||||
}
|
||||
return urls
|
||||
}
|
||||
|
||||
func (r *Rewriter) RewriteCSS(sessionID uint64, baseurl string, css string) string {
|
||||
for _, idx := range cssUrlsIndex(css) {
|
||||
f := idx[0]
|
||||
t := idx[1]
|
||||
rawurl, q := unquote(css[f:t])
|
||||
// why exactly quote back?
|
||||
css = css[:f] + q + r.RewriteURL(sessionID, baseurl, rawurl) + q + css[t:]
|
||||
}
|
||||
return strings.Replace(css, ":hover", ".-asayer-hover", -1)
|
||||
}
|
||||
21
backend/pkg/url/assets/rewriter.go
Normal file
21
backend/pkg/url/assets/rewriter.go
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
package assets
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
type Rewriter struct {
|
||||
assetsURL *url.URL
|
||||
}
|
||||
|
||||
func NewRewriter(baseOrigin string) *Rewriter {
|
||||
assetsURL, err := url.Parse(baseOrigin)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
return &Rewriter{
|
||||
assetsURL: assetsURL,
|
||||
}
|
||||
|
||||
}
|
||||
98
backend/pkg/url/assets/url.go
Normal file
98
backend/pkg/url/assets/url.go
Normal file
|
|
@ -0,0 +1,98 @@
|
|||
package assets
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func getSessionKey(sessionID uint64) string {
|
||||
// Based on timestamp, changes once per week. Check out utils/flacker for understanding sessionID
|
||||
return strconv.FormatUint(sessionID>>50, 10)
|
||||
}
|
||||
|
||||
func ResolveURL(baseurl string, rawurl string) string {
|
||||
base, _ := url.ParseRequestURI(baseurl) // fn Only for base urls
|
||||
u, _ := url.Parse(rawurl) // TODO: handle errors ?
|
||||
if base == nil || u == nil {
|
||||
return rawurl
|
||||
}
|
||||
return base.ResolveReference(u).String() // ResolveReference same as base.Parse(rawurl)
|
||||
}
|
||||
|
||||
func isRelativeCachable(relativeURL string) bool {
|
||||
if len(relativeURL) == 0 || relativeURL[0] == '#' {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func isCachable(rawurl string) bool {
|
||||
u, _ := url.Parse(rawurl)
|
||||
if u == nil || u.User != nil {
|
||||
return false
|
||||
}
|
||||
if u.Scheme != "http" && u.Scheme != "https" {
|
||||
return false
|
||||
}
|
||||
ext := filepath.Ext(u.Path)
|
||||
return ext == ".css" ||
|
||||
ext == ".woff" ||
|
||||
ext == ".woff2" ||
|
||||
ext == ".ttf" ||
|
||||
ext == ".otf"
|
||||
}
|
||||
|
||||
func GetFullCachableURL(baseURL string, relativeURL string) (string, bool) {
|
||||
if !isRelativeCachable(relativeURL) {
|
||||
return "", false
|
||||
}
|
||||
return ResolveURL(baseURL, relativeURL), true
|
||||
}
|
||||
|
||||
|
||||
const ASAYER_QUERY_START = "ASAYER_QUERY_ESCtRT"
|
||||
|
||||
func getCachePath(rawurl string) string {
|
||||
u, _ := url.Parse(rawurl)
|
||||
s := "/" + u.Scheme + "/" + u.Hostname() + u.Path
|
||||
if u.RawQuery != "" {
|
||||
if (s[len(s) - 1] != '/') {
|
||||
s += "/"
|
||||
}
|
||||
s += ASAYER_QUERY_START + url.PathEscape(u.RawQuery)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func getCachePathWithKey(sessionID uint64, rawurl string) string {
|
||||
return getCachePath(rawurl) + "." + getSessionKey(sessionID) // Be carefull with slashes
|
||||
}
|
||||
|
||||
func GetCachePathForJS(rawurl string) string {
|
||||
return getCachePath(rawurl)
|
||||
}
|
||||
|
||||
func GetCachePathForAssets(sessionID uint64, rawurl string) string {
|
||||
return getCachePathWithKey(sessionID, rawurl)
|
||||
}
|
||||
|
||||
|
||||
func (r *Rewriter) RewriteURL(sessionID uint64, baseURL string, relativeURL string) string {
|
||||
// TODO: put it in one check within GetFullCachableURL
|
||||
if !isRelativeCachable(relativeURL) {
|
||||
return relativeURL
|
||||
}
|
||||
fullURL := ResolveURL(baseURL, relativeURL)
|
||||
if !isCachable(fullURL) {
|
||||
return relativeURL
|
||||
}
|
||||
|
||||
u := url.URL{
|
||||
Path: r.assetsURL.Path + getCachePathWithKey(sessionID, fullURL),
|
||||
Host: r.assetsURL.Host,
|
||||
Scheme: r.assetsURL.Scheme,
|
||||
}
|
||||
|
||||
return u.String()
|
||||
}
|
||||
|
||||
12
backend/pkg/url/method.go
Normal file
12
backend/pkg/url/method.go
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
package url
|
||||
|
||||
var METHODS = []string{ "GET", "HEAD", "POST" , "PUT" , "DELETE" , "CONNECT" , "OPTIONS" , "TRACE" , "PATCH" }
|
||||
|
||||
func EnsureMethod(method string) string {
|
||||
for _, m := range METHODS {
|
||||
if m == method {
|
||||
return method
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
18
backend/pkg/url/url.go
Normal file
18
backend/pkg/url/url.go
Normal file
|
|
@ -0,0 +1,18 @@
|
|||
package url
|
||||
|
||||
import (
|
||||
"strings"
|
||||
_url "net/url"
|
||||
)
|
||||
|
||||
func DiscardURLQuery(url string) string {
|
||||
return strings.Split(url, "?")[0]
|
||||
}
|
||||
|
||||
func GetURLParts(rawURL string) (string, string, error) {
|
||||
u, err := _url.Parse(rawURL)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return u.Host, u.RequestURI(), nil
|
||||
}
|
||||
11
backend/pkg/utime/utime.go
Normal file
11
backend/pkg/utime/utime.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
package utime
|
||||
|
||||
import "time"
|
||||
|
||||
func CurrentTimestamp() int64 {
|
||||
return time.Now().UnixNano() / 1e6
|
||||
}
|
||||
|
||||
func ToMilliseconds(t time.Time) int64 {
|
||||
return t.UnixNano() / 1e6
|
||||
}
|
||||
94
backend/services/alerts/main.go
Normal file
94
backend/services/alerts/main.go
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/clickhouse-go"
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
POSTGRES_STRING := env.String("POSTGRES_STRING")
|
||||
CLICKHOUSE_STRING := env.String("CLICKHOUSE_STRING")
|
||||
NOTIFICATIONS_STRING := env.String("ALERT_NOTIFICATION_STRING")
|
||||
log.Printf("Notifications: %s \nCH: %s\n", NOTIFICATIONS_STRING, CLICKHOUSE_STRING)
|
||||
pg := postgres.NewConn(POSTGRES_STRING)
|
||||
defer pg.Close()
|
||||
|
||||
ch, err := sql.Open("clickhouse", CLICKHOUSE_STRING)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := ch.Ping(); err != nil {
|
||||
if exception, ok := err.(*clickhouse.Exception); ok {
|
||||
log.Printf("[%d] %s \n%s\n", exception.Code, exception.Message, exception.StackTrace)
|
||||
} else {
|
||||
log.Println(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
manager := NewManager(NOTIFICATIONS_STRING, ch, pg)
|
||||
if err := pg.IterateAlerts(func(a *postgres.Alert, err error) {
|
||||
if err != nil {
|
||||
log.Printf("Postgres error: %v\n", err)
|
||||
return
|
||||
}
|
||||
log.Printf("Alert initialization: %+v\n", *a)
|
||||
//log.Printf("CreatedAt: %s\n", *a.CreatedAt)
|
||||
err = manager.Update(a)
|
||||
if err != nil {
|
||||
log.Printf("Alert parse error: %v | Alert: %+v\n", err, *a)
|
||||
return
|
||||
}
|
||||
}); err != nil {
|
||||
log.Fatalf("Postgres error: %v\n", err)
|
||||
}
|
||||
|
||||
listener, err := postgres.NewAlertsListener(POSTGRES_STRING)
|
||||
if err != nil {
|
||||
log.Fatalf("Postgres listener error: %v\n", err)
|
||||
}
|
||||
defer listener.Close()
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
tickAlert := time.Tick(1 * time.Minute)
|
||||
|
||||
log.Printf("Alert service started\n")
|
||||
manager.RequestAll()
|
||||
//return
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
listener.Close()
|
||||
pg.Close()
|
||||
os.Exit(0)
|
||||
case <-tickAlert:
|
||||
log.Printf("Requesting all...%d alerts\n", manager.Length())
|
||||
manager.RequestAll()
|
||||
case iPointer := <-listener.Alerts:
|
||||
log.Printf("Alert update: %+v\n", *iPointer)
|
||||
//log.Printf("CreatedAt: %s\n", *iPointer.CreatedAt)
|
||||
//log.Printf("Notification received for AlertId: %d\n", iPointer.AlertID)
|
||||
err := manager.Update(iPointer)
|
||||
if err != nil {
|
||||
log.Printf("Alert parse error: %+v | Alert: %v\n", err, *iPointer)
|
||||
}
|
||||
case err := <-listener.Errors:
|
||||
log.Printf("listener error: %v\n", err)
|
||||
if err.Error() == "conn closed" {
|
||||
panic("Listener conn lost")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
169
backend/services/alerts/manager.go
Normal file
169
backend/services/alerts/manager.go
Normal file
|
|
@ -0,0 +1,169 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
)
|
||||
|
||||
const CHParallelLimit = 2
|
||||
|
||||
var chCount int64
|
||||
|
||||
type manager struct {
|
||||
notificationsUrl string
|
||||
alertsCache map[uint32]*postgres.Alert
|
||||
cacheMutex sync.Mutex
|
||||
chParallel chan bool
|
||||
ch *sql.DB
|
||||
pg *postgres.Conn
|
||||
pgMutex sync.Mutex
|
||||
notifications map[uint32]*postgres.TenantNotification
|
||||
notificationsGo *sync.WaitGroup
|
||||
notificationsMutex sync.Mutex
|
||||
}
|
||||
|
||||
func NewManager(notificationsUrl string, ch *sql.DB, pg *postgres.Conn) *manager {
|
||||
return &manager{
|
||||
notificationsUrl: notificationsUrl,
|
||||
alertsCache: make(map[uint32]*postgres.Alert),
|
||||
cacheMutex: sync.Mutex{},
|
||||
chParallel: make(chan bool, CHParallelLimit),
|
||||
ch: ch,
|
||||
pg: pg,
|
||||
pgMutex: sync.Mutex{},
|
||||
notifications: make(map[uint32]*postgres.TenantNotification),
|
||||
notificationsGo: &sync.WaitGroup{},
|
||||
notificationsMutex: sync.Mutex{},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (m *manager) Length() int {
|
||||
return len(m.alertsCache)
|
||||
}
|
||||
|
||||
func (m *manager) Update(a *postgres.Alert) error {
|
||||
m.cacheMutex.Lock()
|
||||
defer m.cacheMutex.Unlock()
|
||||
_, exists := m.alertsCache[a.AlertID]
|
||||
if exists && a.DeletedAt != nil {
|
||||
log.Println("deleting alert from memory")
|
||||
delete(m.alertsCache, a.AlertID)
|
||||
return nil
|
||||
} else {
|
||||
m.alertsCache[a.AlertID] = a
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *manager) processAlert(a *postgres.Alert) {
|
||||
defer func() {
|
||||
defer m.notificationsGo.Done()
|
||||
<-m.chParallel
|
||||
}()
|
||||
if !a.CanCheck() {
|
||||
//log.Printf("cannot check %+v", a)
|
||||
//log.Printf("cannot check alertId %d", a.AlertID)
|
||||
log.Printf("cannot check %s", a.Name)
|
||||
return
|
||||
}
|
||||
//log.Printf("checking %+v", a)
|
||||
log.Printf("quering %s", a.Name)
|
||||
//--- For stats:
|
||||
atomic.AddInt64(&chCount, 1)
|
||||
q, err := a.Build()
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
//sub1, args, _ := q.ToSql()
|
||||
//log.Println(sub1)
|
||||
//log.Println(args)
|
||||
//return
|
||||
rows, err := q.RunWith(m.ch).Query()
|
||||
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var (
|
||||
value sql.NullFloat64
|
||||
valid bool
|
||||
)
|
||||
if err := rows.Scan(&value, &valid); err != nil {
|
||||
log.Println(err)
|
||||
continue
|
||||
}
|
||||
//log.Println(valid)
|
||||
//log.Println(value.Valid)
|
||||
if valid && value.Valid {
|
||||
log.Printf("%s: valid", a.Name)
|
||||
m.notificationsMutex.Lock()
|
||||
m.notifications[a.AlertID] = &postgres.TenantNotification{
|
||||
TenantId: a.TenantId,
|
||||
Title: a.Name,
|
||||
Description: fmt.Sprintf("has been triggered, %s = %.0f (%s %.0f).", a.Query.Left, value.Float64, a.Query.Operator, a.Query.Right),
|
||||
ButtonText: "Check metrics for more details",
|
||||
ButtonUrl: fmt.Sprintf("/%d/metrics", a.ProjectID),
|
||||
ImageUrl: nil,
|
||||
Options: map[string]interface{}{"source": "ALERT", "sourceId": a.AlertID, "sourceMeta": a.DetectionMethod, "message": a.Options.Message, "projectId": a.ProjectID},
|
||||
}
|
||||
m.notificationsMutex.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
func (m *manager) RequestAll() {
|
||||
now := time.Now().Unix()
|
||||
m.cacheMutex.Lock()
|
||||
for _, a := range m.alertsCache {
|
||||
m.chParallel <- true
|
||||
m.notificationsGo.Add(1)
|
||||
go m.processAlert(a)
|
||||
//m.processAlert(a)
|
||||
}
|
||||
//log.Println("releasing cache")
|
||||
m.cacheMutex.Unlock()
|
||||
//log.Println("waiting for all alerts to finish")
|
||||
m.notificationsGo.Wait()
|
||||
log.Printf("done %d CH queries in: %ds", chCount, time.Now().Unix()-now)
|
||||
chCount = 0
|
||||
//log.Printf("Processing %d Notifications", len(m.notifications))
|
||||
m.notificationsMutex.Lock()
|
||||
go m.ProcessNotifications(m.notifications)
|
||||
m.notificationsMutex.Unlock()
|
||||
m.notifications = make(map[uint32]*postgres.TenantNotification)
|
||||
//log.Printf("Notifications purged: %d", len(m.notifications))
|
||||
}
|
||||
|
||||
func (m *manager) ProcessNotifications(allNotifications map[uint32]*postgres.TenantNotification) {
|
||||
//return
|
||||
if len(allNotifications) == 0 {
|
||||
log.Println("No notifications to process")
|
||||
return
|
||||
}
|
||||
log.Printf("sending %d notifications", len(allNotifications))
|
||||
allIds := make([]uint32, 0, len(allNotifications))
|
||||
toSend := postgres.Notifications{
|
||||
Notifications: []*postgres.TenantNotification{},
|
||||
}
|
||||
for k, n := range allNotifications {
|
||||
//log.Printf("notification for %d", k)
|
||||
allIds = append(allIds, k)
|
||||
toSend.Notifications = append(toSend.Notifications, n)
|
||||
}
|
||||
toSend.Send(m.notificationsUrl)
|
||||
if err := m.pg.SaveLastNotification(allIds); err != nil {
|
||||
log.Printf("Error saving LastNotification time: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
145
backend/services/assets/cacher/cacher.go
Normal file
145
backend/services/assets/cacher/cacher.go
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
package cacher
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"crypto/tls"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
"openreplay/backend/pkg/storage"
|
||||
)
|
||||
|
||||
const BODY_LIMIT = 6 * (1 << 20) // 6 Mb
|
||||
const MAX_CACHE_DEPTH = 5
|
||||
|
||||
type cacher struct {
|
||||
timeoutMap *timeoutMap // Concurrency implemented
|
||||
s3 *storage.S3 // AWS Docs: "These clients are safe to use concurrently."
|
||||
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
|
||||
rewriter *assets.Rewriter // Read only
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
func NewCacher(region string, bucket string, origin string) *cacher {
|
||||
rewriter := assets.NewRewriter(origin)
|
||||
return &cacher{
|
||||
timeoutMap: newTimeoutMap(),
|
||||
s3: storage.NewS3(region, bucket),
|
||||
httpClient: &http.Client{
|
||||
Timeout: time.Duration(6) * time.Second,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
},
|
||||
},
|
||||
rewriter: rewriter,
|
||||
Errors: make(chan error),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cacher) cacheURL(requestURL string, sessionID uint64, depth byte, context string, isJS bool) {
|
||||
if c.timeoutMap.contains(requestURL) {
|
||||
return
|
||||
}
|
||||
c.timeoutMap.add(requestURL)
|
||||
var cachePath string
|
||||
if (isJS) {
|
||||
cachePath = assets.GetCachePathForJS(requestURL)
|
||||
} else {
|
||||
cachePath = assets.GetCachePathForAssets(sessionID, requestURL)
|
||||
}
|
||||
if c.s3.Exists(cachePath) {
|
||||
return
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("GET", requestURL, nil)
|
||||
req.Header.Set("Cookie", "ABv=3;") // Hack for rueducommerce
|
||||
res, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
c.Errors <- errors.Wrap(err, context)
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
// TODO: retry
|
||||
c.Errors <- errors.Wrap(fmt.Errorf("Status code is %v, ", res.StatusCode), context)
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadAll(io.LimitReader(res.Body, BODY_LIMIT+1))
|
||||
if err != nil {
|
||||
c.Errors <- errors.Wrap(err, context)
|
||||
return
|
||||
}
|
||||
if len(data) > BODY_LIMIT {
|
||||
c.Errors <- errors.Wrap(errors.New("Maximum size exceeded"), context)
|
||||
return
|
||||
}
|
||||
|
||||
contentType := res.Header.Get("Content-Type")
|
||||
if contentType == "" {
|
||||
contentType = mime.TypeByExtension(filepath.Ext(res.Request.URL.Path))
|
||||
}
|
||||
isCSS := strings.HasPrefix(contentType, "text/css")
|
||||
|
||||
strData := string(data)
|
||||
if isCSS {
|
||||
strData = c.rewriter.RewriteCSS(sessionID, requestURL, strData) // TODO: one method for reqrite and return list
|
||||
}
|
||||
|
||||
// TODO: implement in streams
|
||||
err = c.s3.Upload(strings.NewReader(strData), cachePath, contentType, false)
|
||||
if err != nil {
|
||||
c.Errors <- errors.Wrap(err, context)
|
||||
return
|
||||
}
|
||||
c.timeoutMap.add(requestURL)
|
||||
|
||||
if isCSS {
|
||||
if depth > 0 {
|
||||
for _, extractedURL := range assets.ExtractURLsFromCSS(string(data)) {
|
||||
if fullURL, cachable := assets.GetFullCachableURL(requestURL, extractedURL); cachable {
|
||||
go c.cacheURL(fullURL, sessionID, depth-1, context + "\n -> " + fullURL, false)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
c.Errors <- errors.Wrap(err, context)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
c.Errors <- errors.Wrap(errors.New("Maximum recursion cache depth exceeded"), context)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *cacher) CacheJSFile(sourceURL string) {
|
||||
go c.cacheURL(sourceURL, 0, 0, sourceURL, true)
|
||||
}
|
||||
|
||||
func (c *cacher) CacheURL(sessionID uint64, fullURL string) {
|
||||
go c.cacheURL(fullURL, sessionID, MAX_CACHE_DEPTH, fullURL, false)
|
||||
}
|
||||
|
||||
// func (c *cacher) CacheURL(sessionID uint64, baseURL string, relativeURL string) {
|
||||
// if fullURL, cachable := assets.GetFullCachableURL(baseURL, relativeURL); cachable {
|
||||
// c.CacheURL(sessionID, fullURL)
|
||||
// }
|
||||
// }
|
||||
|
||||
// func (c *cacher) CacheCSSLinks(baseURL string, css string, sessionID uint64) {
|
||||
// for _, extractedURL := range assets.ExtractURLsFromCSS(css) {
|
||||
// c.CacheURL(sessionID, baseURL, extractedURL)
|
||||
// }
|
||||
// }
|
||||
|
||||
func (c *cacher) UpdateTimeouts() {
|
||||
c.timeoutMap.deleteOutdated()
|
||||
}
|
||||
45
backend/services/assets/cacher/timeoutMap.go
Normal file
45
backend/services/assets/cacher/timeoutMap.go
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
package cacher
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const MAX_STORAGE_TIME = 18 * time.Hour
|
||||
|
||||
// If problem with cache contention (>=4 core) look at sync.Map
|
||||
|
||||
type timeoutMap struct {
|
||||
mx sync.RWMutex
|
||||
m map[string]time.Time
|
||||
}
|
||||
|
||||
func newTimeoutMap() *timeoutMap {
|
||||
return &timeoutMap{
|
||||
m: make(map[string]time.Time),
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *timeoutMap) add(key string) {
|
||||
tm.mx.Lock()
|
||||
defer tm.mx.Unlock()
|
||||
tm.m[key] = time.Now()
|
||||
}
|
||||
|
||||
func (tm *timeoutMap) contains(key string) bool {
|
||||
tm.mx.RLock()
|
||||
defer tm.mx.RUnlock()
|
||||
_, ok := tm.m[key]
|
||||
return ok
|
||||
}
|
||||
|
||||
func (tm *timeoutMap) deleteOutdated() {
|
||||
now := time.Now()
|
||||
tm.mx.Lock()
|
||||
defer tm.mx.Unlock()
|
||||
for key, t := range tm.m {
|
||||
if now.Sub(t) > MAX_STORAGE_TIME {
|
||||
delete(tm.m, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
32
backend/services/assets/jsexception.go
Normal file
32
backend/services/assets/jsexception.go
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
||||
type frame struct {
|
||||
FileName string `json:"fileName"`
|
||||
}
|
||||
|
||||
|
||||
func extractJSExceptionSources(payload *string) ([]string, error) {
|
||||
var frameList []frame
|
||||
err := json.Unmarshal([]byte(*payload), &frameList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
presentedFileName := make(map[string]bool)
|
||||
var fileNamesList []string
|
||||
|
||||
for _, f := range frameList {
|
||||
fn := strings.Split(f.FileName, "?")[0]
|
||||
if strings.HasPrefix(fn, "http") && !presentedFileName[fn] {
|
||||
fileNamesList = append(fileNamesList, f.FileName)
|
||||
presentedFileName[fn] = true
|
||||
}
|
||||
}
|
||||
return fileNamesList, nil
|
||||
}
|
||||
74
backend/services/assets/main.go
Normal file
74
backend/services/assets/main.go
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/services/assets/cacher"
|
||||
)
|
||||
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
GROUP_CACHE := env.String("GROUP_CACHE")
|
||||
TOPIC_TRIGGER := env.String("TOPIC_TRIGGER")
|
||||
|
||||
cacher := cacher.NewCacher(
|
||||
env.String("AWS_REGION"),
|
||||
env.String("S3_BUCKET_ASSETS"),
|
||||
env.String("ASSETS_ORIGIN"),
|
||||
)
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
GROUP_CACHE,
|
||||
[]string{ TOPIC_TRIGGER },
|
||||
func(sessionID uint64, message messages.Message, e *types.Meta) {
|
||||
switch msg := message.(type) {
|
||||
case *messages.AssetCache:
|
||||
cacher.CacheURL(sessionID, msg.URL)
|
||||
case *messages.ErrorEvent:
|
||||
if msg.Source != "js_exception" {
|
||||
return
|
||||
}
|
||||
sourceList, err := extractJSExceptionSources(&msg.Payload)
|
||||
if err != nil {
|
||||
log.Printf("Error on source extraction: %v", err)
|
||||
return
|
||||
}
|
||||
for _, source := range sourceList {
|
||||
cacher.CacheJSFile(source)
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
tick := time.Tick(20 * time.Minute)
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-tick:
|
||||
cacher.UpdateTimeouts()
|
||||
default:
|
||||
if err := consumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consumption: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
52
backend/services/db/heuristics/anr.go
Normal file
52
backend/services/db/heuristics/anr.go
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
package heuristics
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
||||
const MIN_TIME_AFTER_LAST_HEARTBEAT = 60 * 1000
|
||||
|
||||
type anr struct {
|
||||
readyMessageStore
|
||||
lastLabel string
|
||||
lastHeartbeatTimestamp uint64
|
||||
lastHeartbeatIndex uint64
|
||||
}
|
||||
|
||||
func (h *anr) buildIf(timestamp uint64) {
|
||||
if h.lastHeartbeatTimestamp != 0 && h.lastHeartbeatTimestamp + MIN_TIME_AFTER_LAST_HEARTBEAT <= timestamp {
|
||||
m := &IOSIssueEvent{
|
||||
Type: "anr",
|
||||
ContextString: h.lastLabel,
|
||||
//Context: "{}",
|
||||
//Payload: fmt.SPrint
|
||||
}
|
||||
m.Timestamp = h.lastHeartbeatTimestamp
|
||||
m.Index = h.lastHeartbeatIndex // Associated Index/ MessageID ?
|
||||
h.append(m)
|
||||
h.lastHeartbeatTimestamp = 0
|
||||
h.lastHeartbeatIndex = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (h *anr) HandleMessage(msg Message) {
|
||||
switch m := msg.(type) {
|
||||
case *IOSClickEvent:
|
||||
h.buildIf(m.Timestamp)
|
||||
h.lastLabel = m.Label
|
||||
h.lastHeartbeatTimestamp = m.Timestamp
|
||||
h.lastHeartbeatIndex = m.Index
|
||||
case *IOSInputEvent:
|
||||
h.buildIf(m.Timestamp)
|
||||
h.lastLabel = m.Label
|
||||
h.lastHeartbeatTimestamp = m.Timestamp
|
||||
h.lastHeartbeatIndex = m.Index
|
||||
case *IOSPerformanceEvent:
|
||||
h.buildIf(m.Timestamp)
|
||||
h.lastHeartbeatTimestamp = m.Timestamp
|
||||
h.lastHeartbeatIndex = m.Index
|
||||
case *IOSSessionEnd:
|
||||
h.buildIf(m.Timestamp)
|
||||
}
|
||||
}
|
||||
58
backend/services/db/heuristics/clickrage.go
Normal file
58
backend/services/db/heuristics/clickrage.go
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
package heuristics
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
||||
const CLICK_TIME_DIFF = 200
|
||||
const MIN_CLICKS_IN_A_ROW = 3
|
||||
|
||||
type clickrage struct {
|
||||
readyMessageStore
|
||||
lastTimestamp uint64
|
||||
lastLabel string
|
||||
firstInARawTimestamp uint64
|
||||
firstInARawSeqIndex uint64
|
||||
countsInARow int
|
||||
}
|
||||
|
||||
func (h *clickrage) build() {
|
||||
if h.countsInARow >= MIN_CLICKS_IN_A_ROW {
|
||||
m := &IOSIssueEvent{
|
||||
Type: "click_rage",
|
||||
ContextString: h.lastLabel,
|
||||
//Context: "{}",
|
||||
//Payload: fmt.SPrint
|
||||
}
|
||||
m.Timestamp = h.firstInARawTimestamp
|
||||
m.Index = h.firstInARawSeqIndex // Associated Index/ MessageID ?
|
||||
h.append(m)
|
||||
}
|
||||
h.lastTimestamp = 0
|
||||
h.lastLabel = ""
|
||||
h.firstInARawTimestamp = 0
|
||||
h.firstInARawSeqIndex = 0
|
||||
h.countsInARow = 0
|
||||
}
|
||||
|
||||
func (h *clickrage) HandleMessage(msg Message) {
|
||||
switch m := msg.(type) {
|
||||
case *IOSClickEvent:
|
||||
if h.lastTimestamp + CLICK_TIME_DIFF < m.Timestamp && h.lastLabel == m.Label {
|
||||
h.lastTimestamp = m.Timestamp
|
||||
h.countsInARow += 1
|
||||
return
|
||||
}
|
||||
h.build()
|
||||
if m.Label != "" {
|
||||
h.lastTimestamp = m.Timestamp
|
||||
h.lastLabel = m.Label
|
||||
h.firstInARawTimestamp = m.Timestamp
|
||||
h.firstInARawSeqIndex = m.Index
|
||||
h.countsInARow = 1
|
||||
}
|
||||
case *IOSSessionEnd:
|
||||
h.build()
|
||||
}
|
||||
}
|
||||
65
backend/services/db/heuristics/heuristics.go
Normal file
65
backend/services/db/heuristics/heuristics.go
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
package heuristics
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
)
|
||||
|
||||
type MessageHandler interface {
|
||||
HandleMessage(Message)
|
||||
}
|
||||
type ReadyMessagesIterator interface {
|
||||
IterateReadyMessages(func(Message))
|
||||
}
|
||||
|
||||
type Handler interface {
|
||||
MessageHandler
|
||||
ReadyMessagesIterator
|
||||
}
|
||||
|
||||
type mainHandler map[uint64]*sessHandler
|
||||
|
||||
|
||||
func NewHandler() mainHandler {
|
||||
return make(mainHandler)
|
||||
}
|
||||
|
||||
func (m mainHandler) getSessHandler(session *Session) *sessHandler {
|
||||
if session == nil {
|
||||
//AAAA
|
||||
return nil
|
||||
}
|
||||
s := m[session.SessionID]
|
||||
if s == nil {
|
||||
s = newSessHandler(session)
|
||||
m[session.SessionID] = s
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (m mainHandler) HandleMessage(session *Session, msg Message) {
|
||||
s := m.getSessHandler(session)
|
||||
s.HandleMessage(msg)
|
||||
}
|
||||
|
||||
func (m mainHandler) IterateSessionReadyMessages(sessionID uint64, iter func(msg Message)) {
|
||||
s, ok := m[ sessionID ]
|
||||
if !ok { return }
|
||||
s.IterateReadyMessages(iter)
|
||||
if s.IsEnded() {
|
||||
delete(m, sessionID)
|
||||
}
|
||||
}
|
||||
|
||||
func (m mainHandler) IterateReadyMessages(iter func(sessionID uint64, msg Message)) {
|
||||
for sessionID, s := range m {
|
||||
s.IterateReadyMessages(func(msg Message) {
|
||||
iter(sessionID, msg)
|
||||
})
|
||||
if s.IsEnded() {
|
||||
delete(m, sessionID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
103
backend/services/db/heuristics/performance.go
Normal file
103
backend/services/db/heuristics/performance.go
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
package heuristics
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
||||
const AGGR_TIME = 15 * 60 * 1000
|
||||
|
||||
|
||||
type valueAggregator struct {
|
||||
sum float64
|
||||
count float64
|
||||
}
|
||||
func (va *valueAggregator) aggregate() uint64 {
|
||||
if va.count == 0 {
|
||||
return 0
|
||||
}
|
||||
return uint64(va.sum/va.count)
|
||||
}
|
||||
|
||||
type performanceAggregator struct {
|
||||
readyMessageStore
|
||||
pa *IOSPerformanceAggregated
|
||||
fps valueAggregator
|
||||
cpu valueAggregator
|
||||
memory valueAggregator
|
||||
battery valueAggregator
|
||||
}
|
||||
|
||||
func (h *performanceAggregator) build(timestamp uint64) {
|
||||
if h.pa == nil {
|
||||
return
|
||||
}
|
||||
h.pa.TimestampEnd = timestamp
|
||||
h.pa.AvgFPS = h.fps.aggregate()
|
||||
h.pa.AvgCPU = h.cpu.aggregate()
|
||||
h.pa.AvgMemory = h.memory.aggregate()
|
||||
h.pa.AvgBattery = h.battery.aggregate()
|
||||
|
||||
h.append(h.pa)
|
||||
|
||||
h.pa = &IOSPerformanceAggregated{}
|
||||
for _, agg := range []valueAggregator{h.fps, h.cpu, h.memory, h.battery} {
|
||||
agg.sum = 0
|
||||
agg.count = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (h *performanceAggregator) HandleMessage(msg Message) {
|
||||
if h.pa == nil {
|
||||
h.pa = &IOSPerformanceAggregated{} // TODO: struct type in messages
|
||||
}
|
||||
switch m := msg.(type) { // TODO: All Timestampe messages
|
||||
case *IOSPerformanceEvent:
|
||||
if h.pa.TimestampStart == 0 {
|
||||
h.pa.TimestampStart = m.Timestamp
|
||||
}
|
||||
if h.pa.TimestampStart + AGGR_TIME <= m.Timestamp {
|
||||
h.build(m.Timestamp)
|
||||
}
|
||||
switch m.Name {
|
||||
case "fps":
|
||||
h.fps.count += 1
|
||||
h.fps.sum += float64(m.Value)
|
||||
if m.Value < h.pa.MinFPS || h.pa.MinFPS == 0 {
|
||||
h.pa.MinFPS = m.Value
|
||||
}
|
||||
if m.Value > h.pa.MaxFPS {
|
||||
h.pa.MaxFPS = m.Value
|
||||
}
|
||||
case "mainThreadCPU":
|
||||
h.cpu.count += 1
|
||||
h.cpu.sum += float64(m.Value)
|
||||
if m.Value < h.pa.MinCPU || h.pa.MinCPU == 0 {
|
||||
h.pa.MinCPU = m.Value
|
||||
}
|
||||
if m.Value > h.pa.MaxCPU {
|
||||
h.pa.MaxCPU = m.Value
|
||||
}
|
||||
case "memoryUsage":
|
||||
h.memory.count += 1
|
||||
h.memory.sum += float64(m.Value)
|
||||
if m.Value < h.pa.MinMemory || h.pa.MinMemory == 0 {
|
||||
h.pa.MinMemory = m.Value
|
||||
}
|
||||
if m.Value > h.pa.MaxMemory {
|
||||
h.pa.MaxMemory = m.Value
|
||||
}
|
||||
case "batteryLevel":
|
||||
h.battery.count += 1
|
||||
h.battery.sum += float64(m.Value)
|
||||
if m.Value < h.pa.MinBattery || h.pa.MinBattery == 0 {
|
||||
h.pa.MinBattery = m.Value
|
||||
}
|
||||
if m.Value > h.pa.MaxBattery {
|
||||
h.pa.MaxBattery = m.Value
|
||||
}
|
||||
}
|
||||
case *IOSSessionEnd:
|
||||
h.build(m.Timestamp)
|
||||
}
|
||||
}
|
||||
21
backend/services/db/heuristics/readyMessageStore.go
Normal file
21
backend/services/db/heuristics/readyMessageStore.go
Normal file
|
|
@ -0,0 +1,21 @@
|
|||
package heuristics
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
||||
type readyMessageStore struct {
|
||||
store []Message
|
||||
}
|
||||
|
||||
func (s *readyMessageStore) append(msg Message) {
|
||||
s.store = append(s.store, msg)
|
||||
}
|
||||
|
||||
func (s *readyMessageStore) IterateReadyMessages(cb func(msg Message)) {
|
||||
for _, msg := range s.store {
|
||||
cb(msg)
|
||||
}
|
||||
s.store = nil
|
||||
}
|
||||
47
backend/services/db/heuristics/session.go
Normal file
47
backend/services/db/heuristics/session.go
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
package heuristics
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
)
|
||||
|
||||
|
||||
type sessHandler struct {
|
||||
session *Session
|
||||
handlers []Handler
|
||||
ended bool
|
||||
}
|
||||
|
||||
|
||||
func newSessHandler(session *Session) *sessHandler {
|
||||
return &sessHandler{
|
||||
session: session,
|
||||
handlers: []Handler{
|
||||
new(clickrage),
|
||||
new(performanceAggregator),
|
||||
new(anr),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sessHandler) HandleMessage(msg Message) {
|
||||
for _, h := range s.handlers {
|
||||
h.HandleMessage(msg)
|
||||
}
|
||||
if _, isEnd := msg.(*IOSSessionEnd); isEnd {
|
||||
s.ended = true
|
||||
}
|
||||
if _, isEnd := msg.(*SessionEnd); isEnd {
|
||||
s.ended = true
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sessHandler) IterateReadyMessages(cb func(msg Message)) {
|
||||
for _, h := range s.handlers {
|
||||
h.IterateReadyMessages(cb)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *sessHandler) IsEnded() bool {
|
||||
return s.ended
|
||||
}
|
||||
97
backend/services/db/main.go
Normal file
97
backend/services/db/main.go
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/services/db/heuristics"
|
||||
)
|
||||
|
||||
|
||||
var pg *cache.PGCache
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
initStats()
|
||||
pg = cache.NewPGCache(postgres.NewConn(env.String("POSTGRES_STRING")), 1000 * 60 * 20)
|
||||
defer pg.Close()
|
||||
|
||||
heurFinder := heuristics.NewHandler()
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
env.String("GROUP_DB"),
|
||||
[]string{
|
||||
//env.String("TOPIC_RAW"),
|
||||
env.String("TOPIC_TRIGGER"),
|
||||
},
|
||||
func(sessionID uint64, msg messages.Message, _ *types.Meta) {
|
||||
if err := insertMessage(sessionID, msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v, Message %v", err, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
session, err := pg.GetSession(sessionID)
|
||||
if err != nil {
|
||||
// Might happen due to the assets-related message TODO: log only if session is necessary for this kind of message
|
||||
log.Printf("Error on session retrieving from cache: %v, Message %v, sessionID %v", err, msg, sessionID)
|
||||
return;
|
||||
}
|
||||
|
||||
err = insertStats(session, msg)
|
||||
if err != nil {
|
||||
log.Printf("Stats Insertion Error %v; Session:%v, Message: %v", err, session, msg)
|
||||
}
|
||||
|
||||
heurFinder.HandleMessage(session, msg)
|
||||
heurFinder.IterateSessionReadyMessages(sessionID, func(msg messages.Message) {
|
||||
// TODO: DRY code (carefully with the return statement logic)
|
||||
if err := insertMessage(sessionID, msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v, Message %v", err, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
err = insertStats(session, msg)
|
||||
if err != nil {
|
||||
log.Printf("Stats Insertion Error %v", err)
|
||||
}
|
||||
})
|
||||
},
|
||||
)
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
tick := time.Tick(15 * time.Second)
|
||||
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-tick:
|
||||
commitStats() // TODO: sync with wueue commit
|
||||
default:
|
||||
err := consumer.ConsumeNext()
|
||||
if err != nil {
|
||||
log.Fatalf("Error on consumption: %v", err) // TODO: is always fatal?
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
64
backend/services/db/messages.go
Normal file
64
backend/services/db/messages.go
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func insertMessage(sessionID uint64, msg Message) error {
|
||||
switch m := msg.(type) {
|
||||
// Common
|
||||
case *Metadata:
|
||||
return pg.InsertMetadata(sessionID, m)
|
||||
case *IssueEvent:
|
||||
return pg.InsertIssueEvent(sessionID, m)
|
||||
//TODO: message adapter (transformer) (at the level of pkg/message) for types:
|
||||
// case *IOSMetadata, *IOSIssueEvent and others
|
||||
|
||||
// Web
|
||||
case *SessionStart:
|
||||
return pg.InsertWebSessionStart(sessionID, m)
|
||||
case *SessionEnd:
|
||||
return pg.InsertWebSessionEnd(sessionID, m)
|
||||
case *UserID:
|
||||
return pg.InsertWebUserID(sessionID, m)
|
||||
case *UserAnonymousID:
|
||||
return pg.InsertWebUserAnonymousID(sessionID, m)
|
||||
case *CustomEvent:
|
||||
return pg.InsertWebCustomEvent(sessionID, m)
|
||||
case *ClickEvent:
|
||||
return pg.InsertWebClickEvent(sessionID, m)
|
||||
case *InputEvent:
|
||||
return pg.InsertWebInputEvent(sessionID, m)
|
||||
// Unique Web messages
|
||||
case *ResourceEvent:
|
||||
return pg.InsertWebResourceEvent(sessionID, m)
|
||||
case *PageEvent:
|
||||
return pg.InsertWebPageEvent(sessionID, m)
|
||||
case *ErrorEvent:
|
||||
return pg.InsertWebErrorEvent(sessionID, m)
|
||||
|
||||
// IOS
|
||||
case *IOSSessionStart:
|
||||
return pg.InsertIOSSessionStart(sessionID, m)
|
||||
case *IOSSessionEnd:
|
||||
return pg.InsertIOSSessionEnd(sessionID, m)
|
||||
case *IOSUserID:
|
||||
return pg.InsertIOSUserID(sessionID, m)
|
||||
case *IOSUserAnonymousID:
|
||||
return pg.InsertIOSUserAnonymousID(sessionID, m)
|
||||
case *IOSCustomEvent:
|
||||
return pg.InsertIOSCustomEvent(sessionID, m)
|
||||
case *IOSClickEvent:
|
||||
return pg.InsertIOSClickEvent(sessionID, m)
|
||||
case *IOSInputEvent:
|
||||
return pg.InsertIOSInputEvent(sessionID, m)
|
||||
// Unique IOS messages
|
||||
case *IOSNetworkCall:
|
||||
return pg.InsertIOSNetworkCall(sessionID, m)
|
||||
case *IOSScreenEnter:
|
||||
return pg.InsertIOSScreenEnter(sessionID, m)
|
||||
case *IOSCrash:
|
||||
return pg.InsertIOSCrash(sessionID, m)
|
||||
}
|
||||
return nil // "Not implemented"
|
||||
}
|
||||
35
backend/services/db/stats.go
Normal file
35
backend/services/db/stats.go
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
|
||||
. "openreplay/backend/pkg/messages"
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
)
|
||||
|
||||
func initStats() {
|
||||
// noop
|
||||
}
|
||||
|
||||
|
||||
func insertStats(session *Session, msg Message) error {
|
||||
switch m := msg.(type) {
|
||||
// Web
|
||||
case *PerformanceTrackAggr:
|
||||
return pg.InsertWebStatsPerformance(session.SessionID, m)
|
||||
case *ResourceEvent:
|
||||
return pg.InsertWebStatsResourceEvent(session.SessionID, m)
|
||||
case *LongTask:
|
||||
return pg.InsertWebStatsLongtask(session.SessionID, m)
|
||||
|
||||
// IOS
|
||||
// case *IOSPerformanceAggregated:
|
||||
// return pg.InsertIOSPerformanceAggregated(session, m)
|
||||
// case *IOSNetworkCall:
|
||||
// return pg.InsertIOSNetworkCall(session, m)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func commitStats() error {
|
||||
return nil
|
||||
}
|
||||
316
backend/services/ender/builder/builder.go
Normal file
316
backend/services/ender/builder/builder.go
Normal file
|
|
@ -0,0 +1,316 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"log"
|
||||
|
||||
"openreplay/backend/pkg/intervals"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func getURLExtention(URL string) string {
|
||||
u, err := url.Parse(URL)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
i := strings.LastIndex(u.Path, ".")
|
||||
return u.Path[i+1:]
|
||||
}
|
||||
|
||||
func getResourceType(initiator string, URL string) string {
|
||||
switch initiator {
|
||||
case "xmlhttprequest", "fetch":
|
||||
return "fetch"
|
||||
case "img":
|
||||
return "img"
|
||||
default:
|
||||
switch getURLExtention(URL) {
|
||||
case "css":
|
||||
return "stylesheet"
|
||||
case "js":
|
||||
return "script"
|
||||
case "png", "gif", "jpg", "jpeg", "svg":
|
||||
return "img"
|
||||
case "mp4", "mkv", "ogg", "webm", "avi", "mp3":
|
||||
return "media"
|
||||
default:
|
||||
return "other"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type builder struct {
|
||||
readyMsgs []Message
|
||||
timestamp uint64
|
||||
peBuilder *pageEventBuilder
|
||||
ptaBuilder *performanceTrackAggrBuilder
|
||||
ieBuilder *inputEventBuilder
|
||||
ciFinder *cpuIssueFinder
|
||||
miFinder *memoryIssueFinder
|
||||
ddDetector *domDropDetector
|
||||
crDetector *clickRageDetector
|
||||
dcDetector *deadClickDetector
|
||||
integrationsWaiting bool
|
||||
|
||||
|
||||
sid uint64
|
||||
}
|
||||
|
||||
func NewBuilder() *builder {
|
||||
return &builder{
|
||||
peBuilder: &pageEventBuilder{},
|
||||
ptaBuilder: &performanceTrackAggrBuilder{},
|
||||
ieBuilder: NewInputEventBuilder(),
|
||||
ciFinder: &cpuIssueFinder{},
|
||||
miFinder: &memoryIssueFinder{},
|
||||
ddDetector: &domDropDetector{},
|
||||
crDetector: &clickRageDetector{},
|
||||
dcDetector: &deadClickDetector{},
|
||||
integrationsWaiting: true,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *builder) appendReadyMessage(msg Message) { // interface is never nil even if it holds nil value
|
||||
b.readyMsgs = append(b.readyMsgs, msg)
|
||||
}
|
||||
|
||||
func (b *builder) iterateReadyMessage(iter func(msg Message)) {
|
||||
for _, readyMsg := range b.readyMsgs {
|
||||
iter(readyMsg)
|
||||
}
|
||||
b.readyMsgs = nil
|
||||
}
|
||||
|
||||
func (b *builder) buildSessionEnd() {
|
||||
sessionEnd := &SessionEnd{
|
||||
Timestamp: b.timestamp, // + delay?
|
||||
}
|
||||
b.appendReadyMessage(sessionEnd)
|
||||
}
|
||||
|
||||
func (b *builder) buildPageEvent() {
|
||||
if msg := b.peBuilder.Build(); msg != nil {
|
||||
b.appendReadyMessage(msg)
|
||||
}
|
||||
}
|
||||
func (b *builder) buildPerformanceTrackAggr() {
|
||||
if msg := b.ptaBuilder.Build(); msg != nil {
|
||||
b.appendReadyMessage(msg)
|
||||
}
|
||||
}
|
||||
func (b *builder) buildInputEvent() {
|
||||
if msg := b.ieBuilder.Build(); msg != nil {
|
||||
b.appendReadyMessage(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *builder) handleMessage(message Message, messageID uint64) {
|
||||
timestamp := uint64(message.Meta().Timestamp)
|
||||
if b.timestamp <= timestamp {
|
||||
b.timestamp = timestamp
|
||||
}
|
||||
// Start from the first timestamp.
|
||||
switch msg := message.(type) {
|
||||
case *SessionStart,
|
||||
*Metadata,
|
||||
*UserID,
|
||||
*UserAnonymousID:
|
||||
b.appendReadyMessage(msg)
|
||||
}
|
||||
if b.timestamp == 0 {
|
||||
return
|
||||
}
|
||||
switch msg := message.(type) {
|
||||
case *SetPageLocation:
|
||||
if msg.NavigationStart == 0 {
|
||||
b.appendReadyMessage(&PageEvent{
|
||||
URL: msg.URL,
|
||||
Referrer: msg.Referrer,
|
||||
Loaded: false,
|
||||
MessageID: messageID,
|
||||
Timestamp: b.timestamp,
|
||||
})
|
||||
} else {
|
||||
b.buildPageEvent()
|
||||
b.buildInputEvent()
|
||||
b.ieBuilder.ClearLabels()
|
||||
b.peBuilder.HandleSetPageLocation(msg, messageID, b.timestamp)
|
||||
b.miFinder.HandleSetPageLocation(msg)
|
||||
b.ciFinder.HandleSetPageLocation(msg)
|
||||
}
|
||||
case *PageLoadTiming:
|
||||
if rm := b.peBuilder.HandlePageLoadTiming(msg); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
case *PageRenderTiming:
|
||||
if rm := b.peBuilder.HandlePageRenderTiming(msg); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
case *PerformanceTrack:
|
||||
if rm := b.ptaBuilder.HandlePerformanceTrack(msg, b.timestamp); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
if rm := b.ciFinder.HandlePerformanceTrack(msg, messageID, b.timestamp); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
if rm := b.miFinder.HandlePerformanceTrack(msg, messageID, b.timestamp); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
case *SetInputTarget:
|
||||
if rm := b.ieBuilder.HandleSetInputTarget(msg); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
case *SetInputValue:
|
||||
if rm := b.ieBuilder.HandleSetInputValue(msg, messageID, b.timestamp); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
case *MouseClick:
|
||||
b.buildInputEvent()
|
||||
if rm := b.crDetector.HandleMouseClick(msg, messageID, b.timestamp); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
if msg.Label != "" {
|
||||
b.appendReadyMessage(&ClickEvent{
|
||||
MessageID: messageID,
|
||||
Label: msg.Label,
|
||||
HesitationTime: msg.HesitationTime,
|
||||
Timestamp: b.timestamp,
|
||||
})
|
||||
}
|
||||
case *RawErrorEvent:
|
||||
b.appendReadyMessage(&ErrorEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: msg.Timestamp,
|
||||
Source: msg.Source,
|
||||
Name: msg.Name,
|
||||
Message: msg.Message,
|
||||
Payload: msg.Payload,
|
||||
})
|
||||
case *JSException:
|
||||
b.appendReadyMessage(&ErrorEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: b.timestamp,
|
||||
Source: "js_exception",
|
||||
Name: msg.Name,
|
||||
Message: msg.Message,
|
||||
Payload: msg.Payload,
|
||||
})
|
||||
case *ResourceTiming:
|
||||
tp := getResourceType(msg.Initiator, msg.URL)
|
||||
success := msg.Duration != 0
|
||||
b.appendReadyMessage(&ResourceEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: msg.Timestamp,
|
||||
Duration: msg.Duration,
|
||||
TTFB: msg.TTFB,
|
||||
HeaderSize: msg.HeaderSize,
|
||||
EncodedBodySize: msg.EncodedBodySize,
|
||||
DecodedBodySize: msg.DecodedBodySize,
|
||||
URL: msg.URL,
|
||||
Type: tp,
|
||||
Success: success,
|
||||
})
|
||||
if !success && tp == "fetch" {
|
||||
b.appendReadyMessage(&IssueEvent{
|
||||
Type: "bad_request",
|
||||
MessageID: messageID,
|
||||
Timestamp: msg.Timestamp,
|
||||
ContextString: msg.URL,
|
||||
Context: "",
|
||||
Payload: "",
|
||||
})
|
||||
}
|
||||
case *RawCustomEvent:
|
||||
b.appendReadyMessage(&CustomEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: b.timestamp,
|
||||
Name: msg.Name,
|
||||
Payload: msg.Payload,
|
||||
})
|
||||
case *CustomIssue:
|
||||
b.appendReadyMessage(&IssueEvent{
|
||||
Type: "custom",
|
||||
Timestamp: b.timestamp,
|
||||
MessageID: messageID,
|
||||
ContextString: msg.Name,
|
||||
Payload: msg.Payload,
|
||||
})
|
||||
case *Fetch:
|
||||
b.appendReadyMessage(&ResourceEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: msg.Timestamp,
|
||||
Duration: msg.Duration,
|
||||
URL: msg.URL,
|
||||
Type: "fetch",
|
||||
Success: msg.Status < 300,
|
||||
Method: msg.Method,
|
||||
Status: msg.Status,
|
||||
})
|
||||
case *StateAction:
|
||||
b.appendReadyMessage(&StateActionEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: b.timestamp,
|
||||
Type: msg.Type,
|
||||
})
|
||||
case *GraphQL:
|
||||
b.appendReadyMessage(&GraphQLEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: b.timestamp,
|
||||
Name: msg.OperationName,
|
||||
})
|
||||
case *CreateElementNode,
|
||||
*CreateTextNode:
|
||||
b.ddDetector.HandleNodeCreation()
|
||||
case *RemoveNode:
|
||||
b.ddDetector.HandleNodeRemoval(b.timestamp)
|
||||
case *CreateDocument:
|
||||
if rm := b.ddDetector.Build(); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
}
|
||||
if rm := b.dcDetector.HandleMessage(message, messageID, b.timestamp); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func (b *builder) checkTimeouts(ts int64) bool {
|
||||
if b.timestamp == 0 {
|
||||
return false // There was no timestamp events yet
|
||||
}
|
||||
|
||||
if b.peBuilder.HasInstance() && int64(b.peBuilder.GetTimestamp())+intervals.EVENTS_PAGE_EVENT_TIMEOUT < ts {
|
||||
b.buildPageEvent()
|
||||
}
|
||||
if b.ieBuilder.HasInstance() && int64(b.ieBuilder.GetTimestamp())+intervals.EVENTS_INPUT_EVENT_TIMEOUT < ts {
|
||||
b.buildInputEvent()
|
||||
}
|
||||
if b.ptaBuilder.HasInstance() && int64(b.ptaBuilder.GetStartTimestamp())+intervals.EVENTS_PERFORMANCE_AGGREGATION_TIMEOUT < ts {
|
||||
b.buildPerformanceTrackAggr()
|
||||
}
|
||||
|
||||
lastTsGap := ts - int64(b.timestamp)
|
||||
//log.Printf("checking timeouts for sess %v: %v now, %v sesstime; gap %v",b.sid, ts, b.timestamp, lastTsGap)
|
||||
if lastTsGap > intervals.EVENTS_SESSION_END_TIMEOUT {
|
||||
if rm := b.ddDetector.Build(); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
if rm := b.ciFinder.Build(); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
if rm := b.miFinder.Build(); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
if rm := b.crDetector.Build(); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
if rm := b.dcDetector.HandleReaction(b.timestamp); rm != nil {
|
||||
b.appendReadyMessage(rm)
|
||||
}
|
||||
b.buildSessionEnd()
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
52
backend/services/ender/builder/builderMap.go
Normal file
52
backend/services/ender/builder/builderMap.go
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
type builderMap map[uint64]*builder
|
||||
|
||||
|
||||
func NewBuilderMap() builderMap {
|
||||
return make(builderMap)
|
||||
}
|
||||
|
||||
func (m builderMap) GetBuilder(sessionID uint64) *builder {
|
||||
b := m[sessionID]
|
||||
if b == nil {
|
||||
b = NewBuilder()
|
||||
m[sessionID] = b
|
||||
b.sid = sessionID
|
||||
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (m builderMap) HandleMessage(sessionID uint64, msg Message, messageID uint64) {
|
||||
b := m.GetBuilder(sessionID)
|
||||
b.handleMessage(msg, messageID)
|
||||
}
|
||||
|
||||
func (m builderMap) IterateSessionReadyMessages(sessionID uint64, operatingTs int64, iter func(msg Message)) {
|
||||
b, ok := m[ sessionID ]
|
||||
if !ok { return }
|
||||
sessionEnded := b.checkTimeouts(operatingTs)
|
||||
b.iterateReadyMessage(iter)
|
||||
if sessionEnded {
|
||||
delete(m, sessionID)
|
||||
}
|
||||
}
|
||||
|
||||
func (m builderMap) IterateReadyMessages(operatingTs int64, iter func(sessionID uint64, msg Message)) {
|
||||
for sessionID, b := range m {
|
||||
sessionEnded := b.checkTimeouts(operatingTs)
|
||||
b.iterateReadyMessage(func(msg Message) {
|
||||
iter(sessionID, msg)
|
||||
})
|
||||
if sessionEnded {
|
||||
delete(m, sessionID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
57
backend/services/ender/builder/clikRageDetector.go
Normal file
57
backend/services/ender/builder/clikRageDetector.go
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
||||
const CLICK_TIME_DIFF = 200
|
||||
const MIN_CLICKS_IN_A_ROW = 3
|
||||
|
||||
type clickRageDetector struct {
|
||||
lastTimestamp uint64
|
||||
lastLabel string
|
||||
firstInARawTimestamp uint64
|
||||
firstInARawMessageId uint64
|
||||
countsInARow int
|
||||
}
|
||||
|
||||
|
||||
func (crd *clickRageDetector) Build() *IssueEvent {
|
||||
var i *IssueEvent
|
||||
if crd.countsInARow >= MIN_CLICKS_IN_A_ROW {
|
||||
payload, _ := json.Marshal(struct{Count int }{crd.countsInARow,})
|
||||
i = &IssueEvent{
|
||||
Type: "click_rage",
|
||||
ContextString: crd.lastLabel,
|
||||
Payload: string(payload), // TODO: json encoder
|
||||
Timestamp: crd.firstInARawTimestamp,
|
||||
MessageID: crd.firstInARawMessageId,
|
||||
}
|
||||
}
|
||||
crd.lastTimestamp = 0
|
||||
crd.lastLabel = ""
|
||||
crd.firstInARawTimestamp = 0
|
||||
crd.firstInARawMessageId = 0
|
||||
crd.countsInARow = 0
|
||||
return i
|
||||
}
|
||||
|
||||
func (crd *clickRageDetector) HandleMouseClick(msg *MouseClick, messageID uint64, timestamp uint64) *IssueEvent {
|
||||
if crd.lastTimestamp + CLICK_TIME_DIFF < timestamp && crd.lastLabel == msg.Label {
|
||||
crd.lastTimestamp = timestamp
|
||||
crd.countsInARow += 1
|
||||
return nil
|
||||
}
|
||||
i := crd.Build()
|
||||
if msg.Label != "" {
|
||||
crd.lastTimestamp = timestamp
|
||||
crd.lastLabel = msg.Label
|
||||
crd.firstInARawTimestamp = timestamp
|
||||
crd.firstInARawMessageId = messageID
|
||||
crd.countsInARow = 1
|
||||
}
|
||||
return i
|
||||
}
|
||||
86
backend/services/ender/builder/cpuIssueFinder.go
Normal file
86
backend/services/ender/builder/cpuIssueFinder.go
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"openreplay/backend/pkg/messages/performance"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
const CPU_THRESHOLD = 70 // % out of 100
|
||||
const CPU_MIN_DURATION_TRIGGER = 6 * 1000
|
||||
|
||||
|
||||
type cpuIssueFinder struct {
|
||||
startTimestamp uint64
|
||||
startMessageID uint64
|
||||
lastTimestamp uint64
|
||||
maxRate uint64
|
||||
contextString string
|
||||
}
|
||||
|
||||
func (f *cpuIssueFinder) Build() *IssueEvent {
|
||||
if f.startTimestamp == 0 {
|
||||
return nil
|
||||
}
|
||||
duration := f.lastTimestamp - f.startTimestamp
|
||||
timestamp := f.startTimestamp
|
||||
messageID := f.startMessageID
|
||||
maxRate := f.maxRate
|
||||
|
||||
f.startTimestamp = 0
|
||||
f.startMessageID = 0
|
||||
f.maxRate = 0
|
||||
if duration < CPU_MIN_DURATION_TRIGGER {
|
||||
return nil
|
||||
}
|
||||
|
||||
payload, _ := json.Marshal(struct{
|
||||
Duration uint64
|
||||
Rate uint64
|
||||
}{duration,maxRate})
|
||||
return &IssueEvent{
|
||||
Type: "cpu",
|
||||
Timestamp: timestamp,
|
||||
MessageID: messageID,
|
||||
ContextString: f.contextString,
|
||||
Payload: string(payload),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *cpuIssueFinder) HandleSetPageLocation(msg *SetPageLocation) {
|
||||
f.contextString = msg.URL
|
||||
}
|
||||
|
||||
|
||||
|
||||
func (f *cpuIssueFinder) HandlePerformanceTrack(msg *PerformanceTrack, messageID uint64, timestamp uint64) *IssueEvent {
|
||||
dt := performance.TimeDiff(timestamp, f.lastTimestamp)
|
||||
if dt == 0 {
|
||||
return nil // TODO: handle error
|
||||
}
|
||||
|
||||
f.lastTimestamp = timestamp
|
||||
|
||||
if msg.Frames == -1 || msg.Ticks == -1 {
|
||||
return f.Build()
|
||||
}
|
||||
|
||||
cpuRate := performance.CPURate(msg.Ticks, dt)
|
||||
|
||||
if cpuRate >= CPU_THRESHOLD {
|
||||
if f.startTimestamp == 0 {
|
||||
f.startTimestamp = timestamp
|
||||
f.startMessageID = messageID
|
||||
}
|
||||
if f.maxRate < cpuRate {
|
||||
f.maxRate = cpuRate
|
||||
}
|
||||
} else {
|
||||
return f.Build()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
55
backend/services/ender/builder/deadClickDetector.go
Normal file
55
backend/services/ender/builder/deadClickDetector.go
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
||||
const CLICK_RELATION_TIME = 1400
|
||||
|
||||
type deadClickDetector struct {
|
||||
lastMouseClick *MouseClick
|
||||
lastTimestamp uint64
|
||||
lastMessageID uint64
|
||||
}
|
||||
|
||||
|
||||
func (d *deadClickDetector) HandleReaction(timestamp uint64) *IssueEvent {
|
||||
var i *IssueEvent
|
||||
if d.lastMouseClick != nil && d.lastTimestamp + CLICK_RELATION_TIME < timestamp {
|
||||
i = &IssueEvent{
|
||||
Type: "dead_click",
|
||||
ContextString: d.lastMouseClick.Label,
|
||||
Timestamp: d.lastTimestamp,
|
||||
MessageID: d.lastMessageID,
|
||||
}
|
||||
}
|
||||
d.lastMouseClick = nil
|
||||
d.lastTimestamp = 0
|
||||
d.lastMessageID = 0
|
||||
return i
|
||||
}
|
||||
|
||||
func (d *deadClickDetector) HandleMessage(msg Message, messageID uint64, timestamp uint64) *IssueEvent {
|
||||
var i *IssueEvent
|
||||
switch m := msg.(type) {
|
||||
case *MouseClick:
|
||||
i = d.HandleReaction(timestamp)
|
||||
d.lastMouseClick = m
|
||||
d.lastTimestamp = timestamp
|
||||
d.lastMessageID = messageID
|
||||
case *SetNodeAttribute,
|
||||
*RemoveNodeAttribute,
|
||||
*CreateElementNode,
|
||||
*CreateTextNode,
|
||||
*MoveNode,
|
||||
*RemoveNode,
|
||||
*SetCSSData,
|
||||
*CSSInsertRule,
|
||||
*CSSDeleteRule:
|
||||
i = d.HandleReaction(timestamp)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
|
||||
42
backend/services/ender/builder/domDropDetector.go
Normal file
42
backend/services/ender/builder/domDropDetector.go
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
||||
type domDropDetector struct {
|
||||
removedCount int
|
||||
lastDropTimestamp uint64
|
||||
}
|
||||
|
||||
const DROP_WINDOW = 200 //ms
|
||||
const CRITICAL_COUNT = 1 // Our login page contains 20. But on crush it removes only roots (1-3 nodes).
|
||||
|
||||
func (dd *domDropDetector) HandleNodeCreation() {
|
||||
dd.removedCount = 0
|
||||
dd.lastDropTimestamp = 0
|
||||
}
|
||||
|
||||
func (dd *domDropDetector) HandleNodeRemoval(ts uint64) {
|
||||
if dd.lastDropTimestamp + DROP_WINDOW > ts {
|
||||
dd.removedCount += 1
|
||||
} else {
|
||||
dd.removedCount = 1
|
||||
}
|
||||
dd.lastDropTimestamp = ts
|
||||
}
|
||||
|
||||
|
||||
func (dd *domDropDetector) Build() *DOMDrop {
|
||||
var domDrop *DOMDrop
|
||||
if dd.removedCount >= CRITICAL_COUNT {
|
||||
domDrop = &DOMDrop{
|
||||
Timestamp: dd.lastDropTimestamp,
|
||||
}
|
||||
}
|
||||
dd.removedCount = 0
|
||||
dd.lastDropTimestamp = 0
|
||||
return domDrop
|
||||
}
|
||||
|
||||
80
backend/services/ender/builder/inputEventBuilder.go
Normal file
80
backend/services/ender/builder/inputEventBuilder.go
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
type inputLabels map[uint64]string
|
||||
|
||||
type inputEventBuilder struct {
|
||||
inputEvent *InputEvent
|
||||
inputLabels inputLabels
|
||||
inputID uint64
|
||||
}
|
||||
|
||||
func NewInputEventBuilder() *inputEventBuilder {
|
||||
ieBuilder := &inputEventBuilder{}
|
||||
ieBuilder.ClearLabels()
|
||||
return ieBuilder
|
||||
}
|
||||
|
||||
|
||||
func (b *inputEventBuilder) ClearLabels() {
|
||||
b.inputLabels = make(inputLabels)
|
||||
}
|
||||
|
||||
func (b *inputEventBuilder) HandleSetInputTarget(msg *SetInputTarget) *InputEvent {
|
||||
var inputEvent *InputEvent
|
||||
if b.inputID != msg.ID {
|
||||
inputEvent = b.Build()
|
||||
b.inputID = msg.ID
|
||||
}
|
||||
b.inputLabels[msg.ID] = msg.Label
|
||||
return inputEvent
|
||||
}
|
||||
|
||||
func (b *inputEventBuilder) HandleSetInputValue(msg *SetInputValue, messageID uint64, timestamp uint64) *InputEvent {
|
||||
var inputEvent *InputEvent
|
||||
if b.inputID != msg.ID {
|
||||
inputEvent = b.Build()
|
||||
b.inputID = msg.ID
|
||||
}
|
||||
if b.inputEvent == nil {
|
||||
b.inputEvent = &InputEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: timestamp,
|
||||
Value: msg.Value,
|
||||
ValueMasked: msg.Mask > 0,
|
||||
}
|
||||
} else {
|
||||
b.inputEvent.Value = msg.Value
|
||||
b.inputEvent.ValueMasked = msg.Mask > 0
|
||||
}
|
||||
return inputEvent
|
||||
}
|
||||
|
||||
func (b *inputEventBuilder) HasInstance() bool {
|
||||
return b.inputEvent != nil
|
||||
}
|
||||
|
||||
func (b * inputEventBuilder) GetTimestamp() uint64 {
|
||||
if b.inputEvent == nil {
|
||||
return 0
|
||||
}
|
||||
return b.inputEvent.Timestamp;
|
||||
}
|
||||
|
||||
func (b *inputEventBuilder) Build() *InputEvent {
|
||||
if b.inputEvent == nil {
|
||||
return nil
|
||||
}
|
||||
inputEvent := b.inputEvent
|
||||
label := b.inputLabels[b.inputID]
|
||||
// if !ok {
|
||||
// return nil
|
||||
// }
|
||||
inputEvent.Label = label
|
||||
|
||||
b.inputEvent = nil
|
||||
return inputEvent
|
||||
}
|
||||
72
backend/services/ender/builder/memoryIssueFinder.go
Normal file
72
backend/services/ender/builder/memoryIssueFinder.go
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
"math"
|
||||
"encoding/json"
|
||||
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
const MIN_COUNT = 3
|
||||
const MEM_RATE_THRESHOLD = 300 // % to average
|
||||
|
||||
type memoryIssueFinder struct {
|
||||
startMessageID uint64
|
||||
startTimestamp uint64
|
||||
rate int
|
||||
count float64
|
||||
sum float64
|
||||
contextString string
|
||||
}
|
||||
|
||||
func (f *memoryIssueFinder) Build() *IssueEvent {
|
||||
if f.startTimestamp == 0 {
|
||||
return nil
|
||||
}
|
||||
payload, _ := json.Marshal(struct{Rate int }{f.rate - 100,})
|
||||
i := &IssueEvent{
|
||||
Type: "memory",
|
||||
Timestamp: f.startTimestamp,
|
||||
MessageID: f.startMessageID,
|
||||
ContextString: f.contextString,
|
||||
Payload: string(payload),
|
||||
}
|
||||
f.startTimestamp = 0
|
||||
f.startMessageID = 0
|
||||
f.rate = 0
|
||||
return i
|
||||
}
|
||||
|
||||
func (f *memoryIssueFinder) HandleSetPageLocation(msg *SetPageLocation) {
|
||||
f.contextString = msg.URL
|
||||
}
|
||||
|
||||
func (f *memoryIssueFinder) HandlePerformanceTrack(msg *PerformanceTrack, messageID uint64, timestamp uint64) *IssueEvent {
|
||||
if f.count < MIN_COUNT {
|
||||
f.sum += float64(msg.UsedJSHeapSize)
|
||||
f.count++
|
||||
return nil
|
||||
}
|
||||
|
||||
average := f.sum/f.count
|
||||
rate := int(math.Round(float64(msg.UsedJSHeapSize)/average * 100))
|
||||
|
||||
f.sum += float64(msg.UsedJSHeapSize)
|
||||
f.count++
|
||||
|
||||
if rate >= MEM_RATE_THRESHOLD {
|
||||
if f.startTimestamp == 0 {
|
||||
f.startTimestamp = timestamp
|
||||
f.startMessageID = messageID
|
||||
}
|
||||
if f.rate < rate {
|
||||
f.rate = rate
|
||||
}
|
||||
} else {
|
||||
return f.Build()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
91
backend/services/ender/builder/pageEventBuilder.go
Normal file
91
backend/services/ender/builder/pageEventBuilder.go
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
type pageEventBuilder struct {
|
||||
pageEvent *PageEvent
|
||||
firstTimingHandled bool
|
||||
}
|
||||
|
||||
func (b *pageEventBuilder) buildIfTimingsComplete() *PageEvent {
|
||||
if b.firstTimingHandled {
|
||||
return b.Build()
|
||||
}
|
||||
b.firstTimingHandled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only for Loaded: true
|
||||
func (b *pageEventBuilder) HandleSetPageLocation(msg *SetPageLocation, messageID uint64, timestamp uint64) {
|
||||
b.pageEvent = &PageEvent{
|
||||
URL: msg.URL,
|
||||
Referrer: msg.Referrer,
|
||||
Loaded: true,
|
||||
MessageID: messageID,
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
}
|
||||
|
||||
func (b * pageEventBuilder) HandlePageLoadTiming(msg *PageLoadTiming) *PageEvent {
|
||||
if !b.HasInstance() {
|
||||
return nil
|
||||
}
|
||||
if msg.RequestStart <= 30000 {
|
||||
b.pageEvent.RequestStart = msg.RequestStart
|
||||
}
|
||||
if msg.ResponseStart <= 30000 {
|
||||
b.pageEvent.ResponseStart = msg.ResponseStart
|
||||
}
|
||||
if msg.ResponseEnd <= 30000 {
|
||||
b.pageEvent.ResponseEnd = msg.ResponseEnd
|
||||
}
|
||||
if msg.DomContentLoadedEventStart <= 30000 {
|
||||
b.pageEvent.DomContentLoadedEventStart = msg.DomContentLoadedEventStart
|
||||
}
|
||||
if msg.DomContentLoadedEventEnd <= 30000 {
|
||||
b.pageEvent.DomContentLoadedEventEnd = msg.DomContentLoadedEventEnd
|
||||
}
|
||||
if msg.LoadEventStart <= 30000 {
|
||||
b.pageEvent.LoadEventStart = msg.LoadEventStart
|
||||
}
|
||||
if msg.LoadEventEnd <= 30000 {
|
||||
b.pageEvent.LoadEventEnd = msg.LoadEventEnd
|
||||
}
|
||||
if msg.FirstPaint <= 30000 {
|
||||
b.pageEvent.FirstPaint = msg.FirstPaint
|
||||
}
|
||||
if msg.FirstContentfulPaint <= 30000 {
|
||||
b.pageEvent.FirstContentfulPaint = msg.FirstContentfulPaint
|
||||
}
|
||||
return b.buildIfTimingsComplete()
|
||||
}
|
||||
|
||||
func (b * pageEventBuilder) HandlePageRenderTiming(msg *PageRenderTiming) *PageEvent {
|
||||
if !b.HasInstance() {
|
||||
return nil
|
||||
}
|
||||
b.pageEvent.SpeedIndex = msg.SpeedIndex
|
||||
b.pageEvent.VisuallyComplete = msg.VisuallyComplete
|
||||
b.pageEvent.TimeToInteractive = msg.TimeToInteractive
|
||||
return b.buildIfTimingsComplete()
|
||||
}
|
||||
|
||||
func (b *pageEventBuilder) HasInstance() bool {
|
||||
return b.pageEvent != nil
|
||||
}
|
||||
|
||||
func (b * pageEventBuilder) GetTimestamp() uint64 {
|
||||
if b.pageEvent == nil {
|
||||
return 0
|
||||
}
|
||||
return b.pageEvent.Timestamp;
|
||||
}
|
||||
|
||||
func (b * pageEventBuilder) Build() *PageEvent {
|
||||
pageEvent := b.pageEvent
|
||||
b.pageEvent = nil
|
||||
b.firstTimingHandled = false
|
||||
return pageEvent
|
||||
}
|
||||
109
backend/services/ender/builder/performanceTrackAggrBuilder.go
Normal file
109
backend/services/ender/builder/performanceTrackAggrBuilder.go
Normal file
|
|
@ -0,0 +1,109 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"openreplay/backend/pkg/messages/performance"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
||||
type performanceTrackAggrBuilder struct {
|
||||
performanceTrackAggr *PerformanceTrackAggr
|
||||
lastTimestamp uint64
|
||||
count float64
|
||||
sumFrameRate float64
|
||||
sumTickRate float64
|
||||
sumTotalJSHeapSize float64
|
||||
sumUsedJSHeapSize float64
|
||||
}
|
||||
|
||||
|
||||
func (b *performanceTrackAggrBuilder) start(timestamp uint64) {
|
||||
b.performanceTrackAggr = &PerformanceTrackAggr{
|
||||
TimestampStart: timestamp,
|
||||
}
|
||||
b.lastTimestamp = timestamp
|
||||
}
|
||||
|
||||
func (b *performanceTrackAggrBuilder) HandlePerformanceTrack(msg *PerformanceTrack, timestamp uint64) *PerformanceTrackAggr {
|
||||
if msg.Frames == -1 || msg.Ticks == -1 || !b.HasInstance() {
|
||||
performanceTrackAggr := b.Build()
|
||||
b.start(timestamp)
|
||||
return performanceTrackAggr
|
||||
}
|
||||
|
||||
dt := performance.TimeDiff(timestamp, b.lastTimestamp)
|
||||
if dt == 0 {
|
||||
return nil // TODO: handle error
|
||||
}
|
||||
|
||||
frameRate := performance.FrameRate(msg.Frames, dt)
|
||||
tickRate := performance.TickRate(msg.Ticks, dt)
|
||||
|
||||
fps := uint64(math.Round(frameRate))
|
||||
cpu := performance.CPURateFromTickRate(tickRate)
|
||||
if fps < b.performanceTrackAggr.MinFPS || b.performanceTrackAggr.MinFPS == 0 {
|
||||
b.performanceTrackAggr.MinFPS = fps
|
||||
}
|
||||
if fps > b.performanceTrackAggr.MaxFPS {
|
||||
b.performanceTrackAggr.MaxFPS = fps
|
||||
}
|
||||
if cpu < b.performanceTrackAggr.MinCPU || b.performanceTrackAggr.MinCPU == 0 {
|
||||
b.performanceTrackAggr.MinCPU = cpu
|
||||
}
|
||||
if cpu > b.performanceTrackAggr.MaxCPU {
|
||||
b.performanceTrackAggr.MaxCPU = cpu
|
||||
}
|
||||
if msg.TotalJSHeapSize < b.performanceTrackAggr.MinTotalJSHeapSize || b.performanceTrackAggr.MinTotalJSHeapSize == 0 {
|
||||
b.performanceTrackAggr.MinTotalJSHeapSize = msg.TotalJSHeapSize
|
||||
}
|
||||
if msg.TotalJSHeapSize > b.performanceTrackAggr.MaxTotalJSHeapSize {
|
||||
b.performanceTrackAggr.MaxTotalJSHeapSize = msg.TotalJSHeapSize
|
||||
}
|
||||
if msg.UsedJSHeapSize < b.performanceTrackAggr.MinUsedJSHeapSize || b.performanceTrackAggr.MinUsedJSHeapSize == 0 {
|
||||
b.performanceTrackAggr.MinUsedJSHeapSize = msg.UsedJSHeapSize
|
||||
}
|
||||
if msg.UsedJSHeapSize > b.performanceTrackAggr.MaxUsedJSHeapSize {
|
||||
b.performanceTrackAggr.MaxUsedJSHeapSize = msg.UsedJSHeapSize
|
||||
}
|
||||
b.sumFrameRate += frameRate
|
||||
b.sumTickRate += tickRate
|
||||
b.sumTotalJSHeapSize += float64(msg.TotalJSHeapSize)
|
||||
b.sumUsedJSHeapSize += float64(msg.UsedJSHeapSize)
|
||||
b.count += 1
|
||||
b.lastTimestamp = timestamp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *performanceTrackAggrBuilder) HasInstance() bool {
|
||||
return b.performanceTrackAggr != nil
|
||||
}
|
||||
|
||||
func (b *performanceTrackAggrBuilder) GetStartTimestamp() uint64 {
|
||||
if b.performanceTrackAggr == nil {
|
||||
return 0
|
||||
}
|
||||
return b.performanceTrackAggr.TimestampStart;
|
||||
}
|
||||
|
||||
func (b *performanceTrackAggrBuilder) Build() *PerformanceTrackAggr {
|
||||
var performanceTrackAggr *PerformanceTrackAggr
|
||||
if b.HasInstance() && b.GetStartTimestamp() != b.lastTimestamp && b.count != 0 {
|
||||
performanceTrackAggr = b.performanceTrackAggr
|
||||
performanceTrackAggr.TimestampEnd = b.lastTimestamp
|
||||
performanceTrackAggr.AvgFPS = uint64(math.Round(b.sumFrameRate / b.count))
|
||||
performanceTrackAggr.AvgCPU = 100 - uint64(math.Round(b.sumTickRate*100/b.count))
|
||||
performanceTrackAggr.AvgTotalJSHeapSize = uint64(math.Round(b.sumTotalJSHeapSize / b.count))
|
||||
performanceTrackAggr.AvgUsedJSHeapSize = uint64(math.Round(b.sumUsedJSHeapSize / b.count))
|
||||
}
|
||||
b.performanceTrackAggr = nil
|
||||
b.count = 0
|
||||
b.sumFrameRate = 0
|
||||
b.sumTickRate = 0
|
||||
b.sumTotalJSHeapSize = 0
|
||||
b.sumUsedJSHeapSize = 0
|
||||
b.lastTimestamp = 0
|
||||
return performanceTrackAggr
|
||||
}
|
||||
|
||||
72
backend/services/ender/main.go
Normal file
72
backend/services/ender/main.go
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"openreplay/backend/pkg/intervals"
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/services/ender/builder"
|
||||
)
|
||||
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
GROUP_EVENTS := env.String("GROUP_ENDER")
|
||||
TOPIC_TRIGGER := env.String("TOPIC_TRIGGER")
|
||||
|
||||
builderMap := builder.NewBuilderMap()
|
||||
var lastTs int64 = 0
|
||||
|
||||
producer := queue.NewProducer()
|
||||
consumer := queue.NewMessageConsumer(
|
||||
GROUP_EVENTS,
|
||||
[]string{
|
||||
env.String("TOPIC_RAW"),
|
||||
},
|
||||
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
|
||||
lastTs = meta.Timestamp
|
||||
builderMap.HandleMessage(sessionID, msg, msg.Meta().Index)
|
||||
// builderMap.IterateSessionReadyMessages(sessionID, lastTs, func(readyMsg messages.Message) {
|
||||
// producer.Produce(TOPIC_TRIGGER, sessionID, messages.Encode(readyMsg))
|
||||
// })
|
||||
},
|
||||
)
|
||||
consumer.DisableAutoCommit()
|
||||
|
||||
tick := time.Tick(intervals.EVENTS_COMMIT_INTERVAL * time.Millisecond)
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
producer.Close(2000)
|
||||
consumer.CommitBack(intervals.EVENTS_BACK_COMMIT_GAP)
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <- tick:
|
||||
builderMap.IterateReadyMessages(time.Now().UnixNano()/1e6, func(sessionID uint64, readyMsg messages.Message) {
|
||||
producer.Produce(TOPIC_TRIGGER, sessionID, messages.Encode(readyMsg))
|
||||
})
|
||||
// TODO: why exactly do we need Flush here and not in any other place?
|
||||
producer.Flush(2000)
|
||||
consumer.CommitBack(intervals.EVENTS_BACK_COMMIT_GAP)
|
||||
default:
|
||||
if err := consumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consuming: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
72
backend/services/http/README.md
Normal file
72
backend/services/http/README.md
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
# HTTP Endpoints
|
||||
|
||||
## Start Mobile Session
|
||||
`POST` /v1/ios/start
|
||||
### Request
|
||||
`application/json`
|
||||
|
||||
```
|
||||
* - required
|
||||
{
|
||||
projectID* number // Encoded ProjectID
|
||||
trackerVersion* string // Tracker version string
|
||||
revID string // Set by user
|
||||
userUUID string // User ID, should be derived from local storage connected to user
|
||||
userOSVersion string
|
||||
userDevice string
|
||||
userDeviceType number //
|
||||
performance //
|
||||
}
|
||||
```
|
||||
|
||||
### Responce
|
||||
|
||||
```
|
||||
200 application/json
|
||||
{
|
||||
imagesHashList // list of most friquently used image hash strings
|
||||
token // Authorisation token to use in other requests (Bearer)
|
||||
userUUID // Should be stored in local storage
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Push message batch
|
||||
`POST` /v1/ios/append
|
||||
### Request
|
||||
`application/octet-stream`
|
||||
```
|
||||
<binary encoded messages>
|
||||
```
|
||||
### Responce
|
||||
200 OK
|
||||
|
||||
OR
|
||||
|
||||
401 Unauthorised - token timed out. Start new session
|
||||
|
||||
|
||||
## Push late message batch (after app reload - crashes etc.)
|
||||
`POST` /v1/ios/late
|
||||
### Request
|
||||
`application/octet-stream`
|
||||
```
|
||||
<binary encoded messages>
|
||||
```
|
||||
### Responce
|
||||
200 OK
|
||||
|
||||
|
||||
|
||||
## Push images
|
||||
`POST` /v1/ios/images
|
||||
|
||||
### Request
|
||||
`multipart/form-data` values:
|
||||
`projectID` [required] // Encoded ProjectID
|
||||
|
||||
Binary files with the hash-filename in the `filename` header each.
|
||||
|
||||
|
||||
### Responce
|
||||
200 OK
|
||||
20
backend/services/http/assets.go
Normal file
20
backend/services/http/assets.go
Normal file
|
|
@ -0,0 +1,20 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func sendAssetForCache(sessionID uint64, baseURL string, relativeURL string) {
|
||||
if fullURL, cachable := assets.GetFullCachableURL(baseURL, relativeURL); cachable {
|
||||
producer.Produce(topicTrigger, sessionID, messages.Encode(&messages.AssetCache{
|
||||
URL: fullURL,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
func sendAssetsForCacheFromCSS(sessionID uint64, baseURL string, css string) {
|
||||
for _, u := range assets.ExtractURLsFromCSS(css) { // TODO: in one shot with rewriting
|
||||
sendAssetForCache(sessionID, baseURL, u)
|
||||
}
|
||||
}
|
||||
41
backend/services/http/geoip/geoip.go
Normal file
41
backend/services/http/geoip/geoip.go
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
package geoip
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net"
|
||||
|
||||
maxminddb "github.com/oschwald/maxminddb-golang"
|
||||
)
|
||||
|
||||
type geoIPRecord struct {
|
||||
Country struct {
|
||||
ISOCode string `maxminddb:"iso_code"`
|
||||
} `maxminddb:"country"`
|
||||
}
|
||||
|
||||
type GeoIP struct {
|
||||
r *maxminddb.Reader
|
||||
}
|
||||
|
||||
func NewGeoIP(file string) *GeoIP {
|
||||
r, err := maxminddb.Open(file)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
return &GeoIP{r}
|
||||
}
|
||||
|
||||
func (geoIP *GeoIP) ExtractISOCode(ip net.IP) string {
|
||||
if ip == nil {
|
||||
return "UN"
|
||||
}
|
||||
var code string
|
||||
var record geoIPRecord
|
||||
if geoIP.r.Lookup(ip, &record) == nil {
|
||||
code = record.Country.ISOCode
|
||||
}
|
||||
if code == "" {
|
||||
code = "UN"
|
||||
}
|
||||
return code
|
||||
}
|
||||
13
backend/services/http/geoip/http.go
Normal file
13
backend/services/http/geoip/http.go
Normal file
|
|
@ -0,0 +1,13 @@
|
|||
package geoip
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/tomasen/realip"
|
||||
)
|
||||
|
||||
func (geoIP *GeoIP) ExtractISOCodeFromHTTPRequest(r *http.Request) string {
|
||||
ip := net.ParseIP(realip.FromRequest(r))
|
||||
return geoIP.ExtractISOCode(ip)
|
||||
}
|
||||
295
backend/services/http/handlers.go
Normal file
295
backend/services/http/handlers.go
Normal file
|
|
@ -0,0 +1,295 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"time"
|
||||
"log"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/token"
|
||||
)
|
||||
|
||||
const JSON_SIZE_LIMIT int64 = 1e3 // 1Kb
|
||||
const BATCH_SIZE_LIMIT int64 = 1e6 // 1Mb
|
||||
|
||||
func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
type request struct {
|
||||
Token string `json:"token"`
|
||||
UserUUID *string `json:"userUUID"`
|
||||
RevID string `json:"revID"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
TrackerVersion string `json:"trackerVersion"`
|
||||
IsSnippet bool `json:"isSnippet"`
|
||||
DeviceMemory uint64 `json:"deviceMemory"`
|
||||
JsHeapSizeLimit uint64 `json:"jsHeapSizeLimit"`
|
||||
ProjectKey *string `json:"projectKey"`
|
||||
}
|
||||
type response struct {
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Delay int64 `json:"delay"`
|
||||
Token string `json:"token"`
|
||||
UserUUID string `json:"userUUID"`
|
||||
SessionID string `json:"sessionID"`
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
req := &request{}
|
||||
body := http.MaxBytesReader(w, r.Body, JSON_SIZE_LIMIT) // what if Body == nil?? // use r.ContentLength to return specific error?
|
||||
//defer body.Close()
|
||||
if err := json.NewDecoder(body).Decode(req); err != nil {
|
||||
responseWithError(w, http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
|
||||
if req.ProjectKey == nil {
|
||||
responseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"))
|
||||
return
|
||||
}
|
||||
|
||||
p, err := pgconn.GetProjectByKey(*req.ProjectKey)
|
||||
if p == nil {
|
||||
if err == nil {
|
||||
responseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active"))
|
||||
} else {
|
||||
responseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
dice := byte(rand.Intn(100)) // [0, 100)
|
||||
if dice >= p.SampleRate {
|
||||
responseWithError(w, http.StatusForbidden, errors.New("cancel"))
|
||||
return
|
||||
}
|
||||
|
||||
userUUID := getUUID(req.UserUUID)
|
||||
tokenData, err := tokenizer.Parse(req.Token)
|
||||
if err != nil { // Starting the new one
|
||||
ua := uaParser.ParseFromHTTPRequest(r)
|
||||
if ua == nil {
|
||||
responseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
return
|
||||
}
|
||||
sessionID, err := flaker.Compose(uint64(startTime.UnixNano() / 1e6))
|
||||
if err != nil {
|
||||
responseWithError(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
|
||||
tokenData = &token.TokenData{sessionID, expTime.UnixNano() / 1e6}
|
||||
|
||||
country := geoIP.ExtractISOCodeFromHTTPRequest(r)
|
||||
producer.Produce(topicRaw, tokenData.ID, Encode(&SessionStart{
|
||||
Timestamp: req.Timestamp,
|
||||
ProjectID: uint64(p.ProjectID),
|
||||
TrackerVersion: req.TrackerVersion,
|
||||
RevID: req.RevID,
|
||||
UserUUID: userUUID,
|
||||
UserAgent: r.Header.Get("User-Agent"),
|
||||
UserOS: ua.OS,
|
||||
UserOSVersion: ua.OSVersion,
|
||||
UserBrowser: ua.Browser,
|
||||
UserBrowserVersion: ua.BrowserVersion,
|
||||
UserDevice: ua.Device,
|
||||
UserDeviceType: ua.DeviceType,
|
||||
UserCountry: country,
|
||||
UserDeviceMemorySize: req.DeviceMemory,
|
||||
UserDeviceHeapSize: req.JsHeapSizeLimit,
|
||||
}))
|
||||
}
|
||||
|
||||
//delayDuration := time.Now().Sub(startTime)
|
||||
responseWithJSON(w, &response{
|
||||
//Timestamp: startTime.UnixNano() / 1e6,
|
||||
//Delay: delayDuration.Nanoseconds() / 1e6,
|
||||
Token: tokenizer.Compose(*tokenData),
|
||||
UserUUID: userUUID,
|
||||
SessionID: strconv.FormatUint(tokenData.ID, 10),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
|
||||
func pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64) {
|
||||
body := http.MaxBytesReader(w, r.Body, BATCH_SIZE_LIMIT)
|
||||
//defer body.Close()
|
||||
var reader io.ReadCloser
|
||||
switch r.Header.Get("Content-Encoding") {
|
||||
case "gzip":
|
||||
reader, err := gzip.NewReader(body)
|
||||
if err != nil {
|
||||
responseWithError(w, http.StatusInternalServerError, err) // TODO: stage-dependent responce
|
||||
return
|
||||
}
|
||||
defer reader.Close()
|
||||
default:
|
||||
reader = body
|
||||
}
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
responseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
return
|
||||
}
|
||||
producer.Produce(topicRaw, sessionID, buf) // What if not able to send?
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func pushMessagesHandler(w http.ResponseWriter, r *http.Request) {
|
||||
sessionData, err := tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
responseWithError(w, http.StatusUnauthorized, err)
|
||||
return
|
||||
}
|
||||
pushMessages(w, r, sessionData.ID)
|
||||
}
|
||||
|
||||
func pushMessagesSeparatelyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
sessionData, err := tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
responseWithError(w, http.StatusUnauthorized, err)
|
||||
return
|
||||
}
|
||||
body := http.MaxBytesReader(w, r.Body, BATCH_SIZE_LIMIT)
|
||||
//defer body.Close()
|
||||
buf, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
responseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
return
|
||||
}
|
||||
//log.Printf("Sending batch...")
|
||||
//startTime := time.Now()
|
||||
|
||||
// analyticsMessages := make([]Message, 0, 200)
|
||||
|
||||
rewritenBuf, err := RewriteBatch(buf, func(msg Message) Message {
|
||||
switch m := msg.(type) {
|
||||
case *SetNodeAttributeURLBased:
|
||||
if m.Name == "src" || m.Name == "href" {
|
||||
sendAssetForCache(sessionData.ID, m.BaseURL, m.Value)
|
||||
msg = &SetNodeAttribute{
|
||||
ID: m.ID,
|
||||
Name: m.Name,
|
||||
Value: rewriter.RewriteURL(sessionData.ID, m.BaseURL, m.Value),
|
||||
}
|
||||
} else if m.Name == "style" {
|
||||
sendAssetsForCacheFromCSS(sessionData.ID, m.BaseURL, m.Value)
|
||||
msg = &SetNodeAttribute{
|
||||
ID: m.ID,
|
||||
Name: m.Name,
|
||||
Value: rewriter.RewriteCSS(sessionData.ID, m.BaseURL, m.Value),
|
||||
}
|
||||
}
|
||||
case *SetCSSDataURLBased:
|
||||
sendAssetsForCacheFromCSS(sessionData.ID, m.BaseURL, m.Data)
|
||||
msg = &SetCSSData{
|
||||
ID: m.ID,
|
||||
Data: rewriter.RewriteCSS(sessionData.ID, m.BaseURL, m.Data),
|
||||
}
|
||||
case *CSSInsertRuleURLBased:
|
||||
sendAssetsForCacheFromCSS(sessionData.ID, m.BaseURL, m.Rule)
|
||||
msg = &CSSInsertRule{
|
||||
ID: m.ID,
|
||||
Index: m.Index,
|
||||
Rule: rewriter.RewriteCSS(sessionData.ID, m.BaseURL, m.Rule),
|
||||
}
|
||||
}
|
||||
|
||||
// switch msg.(type) {
|
||||
// case *BatchMeta, // TODO: watchout! Meta().Index'es are changed here (though it is still unique for the topic-session pair)
|
||||
// *SetPageLocation,
|
||||
// *PageLoadTiming,
|
||||
// *PageRenderTiming,
|
||||
// *PerformanceTrack,
|
||||
// *SetInputTarget,
|
||||
// *SetInputValue,
|
||||
// *MouseClick,
|
||||
// *RawErrorEvent,
|
||||
// *JSException,
|
||||
// *ResourceTiming,
|
||||
// *RawCustomEvent,
|
||||
// *CustomIssue,
|
||||
// *Fetch,
|
||||
// *StateAction,
|
||||
// *GraphQL,
|
||||
// *CreateElementNode,
|
||||
// *CreateTextNode,
|
||||
// *RemoveNode,
|
||||
// *CreateDocument,
|
||||
// *RemoveNodeAttribute,
|
||||
// *MoveNode,
|
||||
// *SetCSSData,
|
||||
// *CSSInsertRule,
|
||||
// *CSSDeleteRule:
|
||||
// analyticsMessages = append(analyticsMessages, msg)
|
||||
//}
|
||||
|
||||
|
||||
|
||||
return msg
|
||||
})
|
||||
if err != nil {
|
||||
responseWithError(w, http.StatusForbidden, err)
|
||||
return
|
||||
}
|
||||
producer.Produce(topicRaw, sessionData.ID, rewritenBuf)
|
||||
//producer.Produce(topicAnalytics, sessionData.ID, WriteBatch(analyticsMessages))
|
||||
//duration := time.Now().Sub(startTime)
|
||||
//log.Printf("Sended batch within %v nsec; %v nsek/byte", duration.Nanoseconds(), duration.Nanoseconds()/int64(len(buf)))
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func notStartedHandler(w http.ResponseWriter, r *http.Request) {
|
||||
type request struct {
|
||||
ProjectKey *string `json:"projectKey"`
|
||||
TrackerVersion string `json:"trackerVersion"`
|
||||
DoNotTrack bool `json:"DoNotTrack"`
|
||||
// RevID string `json:"revID"`
|
||||
}
|
||||
req := &request{}
|
||||
body := http.MaxBytesReader(w, r.Body, JSON_SIZE_LIMIT)
|
||||
defer body.Close()
|
||||
if err := json.NewDecoder(body).Decode(req); err != nil {
|
||||
responseWithError(w, http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
if req.ProjectKey == nil {
|
||||
responseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"))
|
||||
return
|
||||
}
|
||||
ua := uaParser.ParseFromHTTPRequest(r) // TODO?: insert anyway
|
||||
if ua == nil {
|
||||
responseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
return
|
||||
}
|
||||
country := geoIP.ExtractISOCodeFromHTTPRequest(r)
|
||||
err := pgconn.InsertUnstartedSession(postgres.UnstartedSession{
|
||||
ProjectKey: *req.ProjectKey,
|
||||
TrackerVersion: req.TrackerVersion,
|
||||
DoNotTrack: req.DoNotTrack,
|
||||
Platform: "web",
|
||||
UserAgent: r.Header.Get("User-Agent"),
|
||||
UserOS: ua.OS,
|
||||
UserOSVersion: ua.OSVersion,
|
||||
UserBrowser: ua.Browser,
|
||||
UserBrowserVersion: ua.BrowserVersion,
|
||||
UserDevice: ua.Device,
|
||||
UserDeviceType: ua.DeviceType,
|
||||
UserCountry: country,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("Unable to insert Unstarted Session: %v\n", err);
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
|
||||
145
backend/services/http/handlers_ios.go
Normal file
145
backend/services/http/handlers_ios.go
Normal file
|
|
@ -0,0 +1,145 @@
|
|||
package main
|
||||
|
||||
// const FILES_SIZE_LIMIT int64 = 1e8 // 100Mb
|
||||
|
||||
// func startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
// type request struct {
|
||||
// // SessionID *string
|
||||
// EncodedProjectID *uint64 `json:"projectID"`
|
||||
// TrackerVersion string `json:"trackerVersion"`
|
||||
// RevID string `json:"revID"`
|
||||
// UserUUID *string `json:"userUUID"`
|
||||
// //UserOS string `json"userOS"` //hardcoded 'MacOS'
|
||||
// UserOSVersion string `json:"userOSVersion"`
|
||||
// UserDevice string `json:"userDevice"`
|
||||
// Timestamp uint64 `json:"timestamp"`
|
||||
// // UserDeviceType uint 0:phone 1:pad 2:tv 3:carPlay 5:mac
|
||||
// // “performances”:{
|
||||
// // “activeProcessorCount”:8,
|
||||
// // “isLowPowerModeEnabled”:0,
|
||||
// // “orientation”:0,
|
||||
// // “systemUptime”:585430,
|
||||
// // “batteryState”:0,
|
||||
// // “thermalState”:0,
|
||||
// // “batteryLevel”:0,
|
||||
// // “processorCount”:8,
|
||||
// // “physicalMemory”:17179869184
|
||||
// // },
|
||||
// }
|
||||
// type response struct {
|
||||
// Token string `json:"token"`
|
||||
// ImagesHashList []string `json:"imagesHashList"`
|
||||
// UserUUID string `json:"userUUID"`
|
||||
// SESSION_ID uint64 `json:"SESSION_ID"` ///TEMP
|
||||
// }
|
||||
// startTime := time.Now()
|
||||
// req := &request{}
|
||||
// body := http.MaxBytesReader(w, r.Body, JSON_SIZE_LIMIT)
|
||||
// //defer body.Close()
|
||||
// if err := json.NewDecoder(body).Decode(req); err != nil {
|
||||
// responseWithError(w, http.StatusBadRequest, err)
|
||||
// return
|
||||
// }
|
||||
|
||||
// if req.EncodedProjectID == nil {
|
||||
// responseWithError(w, http.StatusForbidden, errors.New("ProjectID value required"))
|
||||
// return
|
||||
// }
|
||||
// projectID := decodeProjectID(*(req.EncodedProjectID))
|
||||
// if projectID == 0 {
|
||||
// responseWithError(w, http.StatusUnprocessableEntity, errors.New("ProjectID value is invalid"))
|
||||
// return
|
||||
// }
|
||||
// p, err := pgconn.GetProject(uint32(projectID))
|
||||
// if p == nil {
|
||||
// if err == nil {
|
||||
// responseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active"))
|
||||
// } else {
|
||||
// responseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
// }
|
||||
// return
|
||||
// }
|
||||
// sessionID, err := flaker.Compose(req.Timestamp)
|
||||
// if err != nil {
|
||||
// responseWithError(w, http.StatusInternalServerError, err)
|
||||
// return
|
||||
// }
|
||||
// userUUID := getUUID(req.UserUUID)
|
||||
// country := geoIP.ExtractISOCodeFromHTTPRequest(r)
|
||||
// expirationTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
|
||||
|
||||
// imagesHashList, err := s3.GetFrequentlyUsedKeys(*(req.EncodedProjectID)) // TODO: reuse index: ~ frequency * size
|
||||
// if err != nil {
|
||||
// responseWithError(w, http.StatusInternalServerError, err)
|
||||
// return
|
||||
// }
|
||||
|
||||
// responseWithJSON(w, &response{
|
||||
// Token: tokenizer.Compose(sessionID, uint64(expirationTime.UnixNano()/1e6)),
|
||||
// ImagesHashList: imagesHashList,
|
||||
// UserUUID: userUUID,
|
||||
// //TEMP:
|
||||
// SESSION_ID: sessionID,
|
||||
// })
|
||||
// producer.Produce(topicRaw, sessionID, messages.Encode(&messages.IOSSessionStart{
|
||||
// Timestamp: req.Timestamp,
|
||||
// ProjectID: projectID,
|
||||
// TrackerVersion: req.TrackerVersion,
|
||||
// RevID: req.RevID,
|
||||
// UserUUID: userUUID,
|
||||
// UserOS: "MacOS",
|
||||
// UserOSVersion: req.UserOSVersion,
|
||||
// UserDevice: MapIOSDevice(req.UserDevice),
|
||||
// UserDeviceType: GetIOSDeviceType(req.UserDevice), // string `json:"userDeviceType"` // From UserDevice; ENUM ?
|
||||
// UserCountry: country,
|
||||
// }))
|
||||
// }
|
||||
|
||||
|
||||
// func pushLateMessagesHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// sessionData, err := tokenizer.ParseFromHTTPRequest(r)
|
||||
// if err != nil && err != token.EXPIRED {
|
||||
// responseWithError(w, http.StatusUnauthorized, err)
|
||||
// return
|
||||
// }
|
||||
// // Check timestamps here?
|
||||
// pushMessages(w, r, sessionData.ID)
|
||||
// }
|
||||
|
||||
|
||||
// func iosImagesUploadHandler(w http.ResponseWriter, r *http.Request) {
|
||||
// r.Body = http.MaxBytesReader(w, r.Body, FILES_SIZE_LIMIT)
|
||||
// // defer r.Body.Close()
|
||||
// err := r.ParseMultipartForm(1e5) // 100Kb
|
||||
// if err == http.ErrNotMultipart || err == http.ErrMissingBoundary {
|
||||
// responseWithError(w, http.StatusUnsupportedMediaType, err)
|
||||
// // } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB
|
||||
// } else if err != nil {
|
||||
// responseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
// }
|
||||
|
||||
// if len(r.MultipartForm.Value["projectID"]) == 0 {
|
||||
// responseWithError(w, http.StatusBadRequest, errors.New("projectID parameter required")) // status for missing/wrong parameter?
|
||||
// return
|
||||
// }
|
||||
// // encodedProjectID, err := strconv.ParseUint(r.MultipartForm.Value["projectID"][0], 10, 64)
|
||||
// // projectID := decodeProjectID(encodedProjectID)
|
||||
// // if projectID == 0 || err != nil {
|
||||
// // responseWithError(w, http.StatusUnprocessableEntity, errors.New("projectID value is incorrect"))
|
||||
// // return
|
||||
// // }
|
||||
// prefix := r.MultipartForm.Value["projectID"][0] + "/" //strconv.FormatUint(uint64(projectID), 10) + "/"
|
||||
|
||||
// for _, fileHeaderList := range r.MultipartForm.File {
|
||||
// for _, fileHeader := range fileHeaderList {
|
||||
// file, err := fileHeader.Open()
|
||||
// if err != nil {
|
||||
// continue // TODO: send server error or accumulate successful files
|
||||
// }
|
||||
// key := prefix + fileHeader.Filename // TODO: Malicious image put: use jwt?
|
||||
// go s3.Upload(file, key, "image/png", false)
|
||||
// }
|
||||
// }
|
||||
|
||||
// w.WriteHeader(http.StatusOK)
|
||||
// }
|
||||
79
backend/services/http/ios_device.go
Normal file
79
backend/services/http/ios_device.go
Normal file
|
|
@ -0,0 +1,79 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func MapIOSDevice(identifier string) string {
|
||||
switch identifier {
|
||||
case "iPod5,1": return "iPod touch (5th generation)"
|
||||
case "iPod7,1": return "iPod touch (6th generation)"
|
||||
case "iPod9,1": return "iPod touch (7th generation)"
|
||||
case "iPhone3,1", "iPhone3,2", "iPhone3,3": return "iPhone 4"
|
||||
case "iPhone4,1": return "iPhone 4s"
|
||||
case "iPhone5,1", "iPhone5,2": return "iPhone 5"
|
||||
case "iPhone5,3", "iPhone5,4": return "iPhone 5c"
|
||||
case "iPhone6,1", "iPhone6,2": return "iPhone 5s"
|
||||
case "iPhone7,2": return "iPhone 6"
|
||||
case "iPhone7,1": return "iPhone 6 Plus"
|
||||
case "iPhone8,1": return "iPhone 6s"
|
||||
case "iPhone8,2": return "iPhone 6s Plus"
|
||||
case "iPhone8,4": return "iPhone SE"
|
||||
case "iPhone9,1", "iPhone9,3": return "iPhone 7"
|
||||
case "iPhone9,2", "iPhone9,4": return "iPhone 7 Plus"
|
||||
case "iPhone10,1", "iPhone10,4": return "iPhone 8"
|
||||
case "iPhone10,2", "iPhone10,5": return "iPhone 8 Plus"
|
||||
case "iPhone10,3", "iPhone10,6": return "iPhone X"
|
||||
case "iPhone11,2": return "iPhone XS"
|
||||
case "iPhone11,4", "iPhone11,6": return "iPhone XS Max"
|
||||
case "iPhone11,8": return "iPhone XR"
|
||||
case "iPhone12,1": return "iPhone 11"
|
||||
case "iPhone12,3": return "iPhone 11 Pro"
|
||||
case "iPhone12,5": return "iPhone 11 Pro Max"
|
||||
case "iPhone12,8": return "iPhone SE (2nd generation)"
|
||||
case "iPhone13,1": return "iPhone 12 mini"
|
||||
case "iPhone13,2": return "iPhone 12"
|
||||
case "iPhone13,3": return "iPhone 12 Pro"
|
||||
case "iPhone13,4": return "iPhone 12 Pro Max"
|
||||
case "iPad2,1", "iPad2,2", "iPad2,3", "iPad2,4":return "iPad 2"
|
||||
case "iPad3,1", "iPad3,2", "iPad3,3": return "iPad (3rd generation)"
|
||||
case "iPad3,4", "iPad3,5", "iPad3,6": return "iPad (4th generation)"
|
||||
case "iPad6,11", "iPad6,12": return "iPad (5th generation)"
|
||||
case "iPad7,5", "iPad7,6": return "iPad (6th generation)"
|
||||
case "iPad7,11", "iPad7,12": return "iPad (7th generation)"
|
||||
case "iPad11,6", "iPad11,7": return "iPad (8th generation)"
|
||||
case "iPad4,1", "iPad4,2", "iPad4,3": return "iPad Air"
|
||||
case "iPad5,3", "iPad5,4": return "iPad Air 2"
|
||||
case "iPad11,3", "iPad11,4": return "iPad Air (3rd generation)"
|
||||
case "iPad13,1", "iPad13,2": return "iPad Air (4th generation)"
|
||||
case "iPad2,5", "iPad2,6", "iPad2,7": return "iPad mini"
|
||||
case "iPad4,4", "iPad4,5", "iPad4,6": return "iPad mini 2"
|
||||
case "iPad4,7", "iPad4,8", "iPad4,9": return "iPad mini 3"
|
||||
case "iPad5,1", "iPad5,2": return "iPad mini 4"
|
||||
case "iPad11,1", "iPad11,2": return "iPad mini (5th generation)"
|
||||
case "iPad6,3", "iPad6,4": return "iPad Pro (9.7-inch)"
|
||||
case "iPad7,3", "iPad7,4": return "iPad Pro (10.5-inch)"
|
||||
case "iPad8,1", "iPad8,2", "iPad8,3", "iPad8,4":return "iPad Pro (11-inch) (1st generation)"
|
||||
case "iPad8,9", "iPad8,10": return "iPad Pro (11-inch) (2nd generation)"
|
||||
case "iPad6,7", "iPad6,8": return "iPad Pro (12.9-inch) (1st generation)"
|
||||
case "iPad7,1", "iPad7,2": return "iPad Pro (12.9-inch) (2nd generation)"
|
||||
case "iPad8,5", "iPad8,6", "iPad8,7", "iPad8,8":return "iPad Pro (12.9-inch) (3rd generation)"
|
||||
case "iPad8,11", "iPad8,12": return "iPad Pro (12.9-inch) (4th generation)"
|
||||
case "AppleTV5,3": return "Apple TV"
|
||||
case "AppleTV6,2": return "Apple TV 4K"
|
||||
case "AudioAccessory1,1": return "HomePod"
|
||||
case "AudioAccessory5,1": return "HomePod mini"
|
||||
case "i386", "x86_64": return "Simulator"
|
||||
default: return identifier
|
||||
}
|
||||
}
|
||||
|
||||
func GetIOSDeviceType(identifier string) string {
|
||||
if strings.Contains(identifier, "iPhone") {
|
||||
return "mobile" //"phone"
|
||||
}
|
||||
if strings.Contains(identifier, "iPad") {
|
||||
return "tablet"
|
||||
}
|
||||
return "other"
|
||||
}
|
||||
137
backend/services/http/main.go
Normal file
137
backend/services/http/main.go
Normal file
|
|
@ -0,0 +1,137 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/storage"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
"openreplay/backend/pkg/token"
|
||||
"openreplay/backend/services/http/geoip"
|
||||
"openreplay/backend/services/http/uaparser"
|
||||
|
||||
)
|
||||
|
||||
var rewriter *assets.Rewriter
|
||||
var producer types.Producer
|
||||
var pgconn *cache.PGCache
|
||||
var flaker *flakeid.Flaker
|
||||
var uaParser *uaparser.UAParser
|
||||
var geoIP *geoip.GeoIP
|
||||
var tokenizer *token.Tokenizer
|
||||
var s3 *storage.S3
|
||||
var topicRaw string
|
||||
var topicTrigger string
|
||||
var topicAnalytics string
|
||||
// var kafkaTopicEvents string
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
producer = queue.NewProducer()
|
||||
defer producer.Close(15000)
|
||||
topicRaw = env.String("TOPIC_RAW")
|
||||
topicTrigger = env.String("TOPIC_TRIGGER")
|
||||
topicAnalytics = env.String("TOPIC_ANALYTICS")
|
||||
rewriter = assets.NewRewriter(env.String("ASSETS_ORIGIN"))
|
||||
pgconn = cache.NewPGCache(postgres.NewConn(env.String("POSTGRES_STRING")), 1000 * 60 * 20)
|
||||
defer pgconn.Close()
|
||||
//s3 = storage.NewS3(env.String("S3_BUCKET_IMAGES_IOS"), env.String("AWS_REGION"))
|
||||
tokenizer = token.NewTokenizer(env.String("TOKEN_SECRET"))
|
||||
uaParser = uaparser.NewUAParser(env.String("UAPARSER_FILE"))
|
||||
geoIP = geoip.NewGeoIP(env.String("MAXMINDDB_FILE"))
|
||||
flaker = flakeid.NewFlaker(env.WorkerID())
|
||||
HTTP_PORT := env.String("HTTP_PORT")
|
||||
|
||||
server := &http.Server{
|
||||
Addr: ":" + HTTP_PORT,
|
||||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// TODO: agree with specification
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "POST")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type,Authorization")
|
||||
if r.Method == http.MethodOptions {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
switch r.URL.Path {
|
||||
case "/":
|
||||
w.WriteHeader(http.StatusOK)
|
||||
case "/v1/web/not-started":
|
||||
switch r.Method {
|
||||
case "POST":
|
||||
notStartedHandler(w, r)
|
||||
default:
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
}
|
||||
case "/v1/web/start":
|
||||
switch r.Method {
|
||||
case "POST":
|
||||
startSessionHandlerWeb(w, r)
|
||||
default:
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
}
|
||||
case "/v1/web/i":
|
||||
switch r.Method {
|
||||
case "POST":
|
||||
pushMessagesSeparatelyHandler(w, r)
|
||||
default:
|
||||
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
}
|
||||
// case "/v1/ios/start":
|
||||
// switch r.Method {
|
||||
// case "POST":
|
||||
// startSessionHandlerIOS(w, r)
|
||||
// default:
|
||||
// w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
// }
|
||||
// case "/v1/ios/append":
|
||||
// switch r.Method {
|
||||
// case "POST":
|
||||
// pushMessagesHandler(w, r)
|
||||
// default:
|
||||
// w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
// }
|
||||
// case "/v1/ios/late":
|
||||
// switch r.Method {
|
||||
// case "POST":
|
||||
// pushLateMessagesHandler(w, r)
|
||||
// default:
|
||||
// w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
// }
|
||||
// case "/v1/ios/images":
|
||||
// switch r.Method {
|
||||
// case "POST":
|
||||
// iosImagesUploadHandler(w, r)
|
||||
// default:
|
||||
// w.WriteHeader(http.StatusMethodNotAllowed)
|
||||
// }
|
||||
default:
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
}
|
||||
}),
|
||||
}
|
||||
go func() {
|
||||
if err := server.ListenAndServe(); err != nil {
|
||||
log.Fatalf("Server error: %v\n", err)
|
||||
}
|
||||
}()
|
||||
log.Printf("Server successfully started on port %v\n", HTTP_PORT)
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-sigchan
|
||||
log.Printf("Shutting down the server\n")
|
||||
server.Shutdown(context.Background())
|
||||
}
|
||||
12
backend/services/http/project_id.go
Normal file
12
backend/services/http/project_id.go
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
package main
|
||||
|
||||
func decodeProjectID(projectID uint64) uint64 {
|
||||
if projectID < 0x10000000000000 || projectID >= 0x20000000000000 {
|
||||
return 0
|
||||
}
|
||||
projectID = (projectID - 0x10000000000000) * 4212451012670231 & 0xfffffffffffff
|
||||
if projectID > 0xffffffff {
|
||||
return 0
|
||||
}
|
||||
return projectID
|
||||
}
|
||||
23
backend/services/http/response.go
Normal file
23
backend/services/http/response.go
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func responseWithJSON(w http.ResponseWriter, res interface{}) {
|
||||
body, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
w.Write(body)
|
||||
}
|
||||
|
||||
func responseWithError(w http.ResponseWriter, code int, err error) {
|
||||
type response struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
w.WriteHeader(code)
|
||||
responseWithJSON(w, &response{err.Error()})
|
||||
}
|
||||
8
backend/services/http/uaparser/http.go
Normal file
8
backend/services/http/uaparser/http.go
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
package uaparser
|
||||
|
||||
import "net/http"
|
||||
|
||||
func (parser *UAParser) ParseFromHTTPRequest(r *http.Request) *UA {
|
||||
str := r.Header.Get("User-Agent")
|
||||
return parser.Parse(str)
|
||||
}
|
||||
77
backend/services/http/uaparser/uaparser.go
Normal file
77
backend/services/http/uaparser/uaparser.go
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
package uaparser
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/ua-parser/uap-go/uaparser"
|
||||
)
|
||||
|
||||
type UAParser struct {
|
||||
p *uaparser.Parser
|
||||
}
|
||||
|
||||
func NewUAParser(regexFile string) *UAParser {
|
||||
p, err := uaparser.New(regexFile)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
return &UAParser{p}
|
||||
}
|
||||
|
||||
type UA struct {
|
||||
OS string
|
||||
OSVersion string
|
||||
Browser string
|
||||
BrowserVersion string
|
||||
Device string
|
||||
DeviceType string
|
||||
}
|
||||
|
||||
func (parser *UAParser) Parse(str string) *UA {
|
||||
if str == "" {
|
||||
return nil
|
||||
}
|
||||
data := parser.p.Parse(str)
|
||||
if data == nil {
|
||||
return nil
|
||||
}
|
||||
ua := &UA{
|
||||
OS: data.Os.Family,
|
||||
Browser: strings.Split(data.UserAgent.Family, "/")[0],
|
||||
Device: data.Device.Family,
|
||||
}
|
||||
if ua.OS == "" || ua.Browser == "" || ua.Device == "Spider" {
|
||||
return nil
|
||||
}
|
||||
if ua.Device == "Other" || ua.Device == "Mac" {
|
||||
ua.Device = ""
|
||||
}
|
||||
if data.Os.Major != "" {
|
||||
ua.OSVersion += data.Os.Major
|
||||
if data.Os.Minor != "" {
|
||||
ua.OSVersion += "." + data.Os.Minor
|
||||
if data.Os.Patch != "" {
|
||||
ua.OSVersion += "." + data.Os.Patch
|
||||
}
|
||||
}
|
||||
}
|
||||
if data.UserAgent.Major != "" {
|
||||
ua.BrowserVersion += data.UserAgent.Major
|
||||
if data.UserAgent.Minor != "" {
|
||||
ua.BrowserVersion += "." + data.UserAgent.Minor
|
||||
if data.UserAgent.Patch != "" {
|
||||
ua.BrowserVersion += "." + data.UserAgent.Patch
|
||||
}
|
||||
}
|
||||
}
|
||||
switch ua.OS {
|
||||
case "Chrome OS", "Fedora", "FreeBSD", "Linux", "Mac OS X", "NetBSD", "Ubuntu", "Windows":
|
||||
ua.DeviceType = "desktop"
|
||||
case "Android", "BlackBerry OS", "BlackBerry Tablet OS", "iOS", "Windows Phone":
|
||||
ua.DeviceType = "mobile"
|
||||
default:
|
||||
ua.DeviceType = "other"
|
||||
}
|
||||
return ua
|
||||
}
|
||||
15
backend/services/http/uuid.go
Normal file
15
backend/services/http/uuid.go
Normal file
|
|
@ -0,0 +1,15 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
func getUUID(u *string) string {
|
||||
if u != nil {
|
||||
_, err := uuid.Parse(*u)
|
||||
if err == nil {
|
||||
return *u
|
||||
}
|
||||
}
|
||||
return uuid.New().String()
|
||||
}
|
||||
51
backend/services/integrations/clientManager/manager.go
Normal file
51
backend/services/integrations/clientManager/manager.go
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
package clientManager
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/services/integrations/integration"
|
||||
)
|
||||
|
||||
|
||||
type manager struct {
|
||||
clientMap integration.ClientMap
|
||||
Events chan *integration.SessionErrorEvent
|
||||
Errors chan error
|
||||
RequestDataUpdates chan postgres.Integration // not pointer because it could change in other thread
|
||||
}
|
||||
|
||||
|
||||
func NewManager() *manager {
|
||||
return &manager {
|
||||
clientMap: make(integration.ClientMap),
|
||||
RequestDataUpdates: make(chan postgres.Integration, 100),
|
||||
Events: make(chan *integration.SessionErrorEvent, 100),
|
||||
Errors: make(chan error, 100),
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (m* manager) Update(i *postgres.Integration) error {
|
||||
key := strconv.Itoa(int(i.ProjectID)) + i.Provider
|
||||
if i.Options == nil {
|
||||
delete(m.clientMap, key)
|
||||
return nil
|
||||
}
|
||||
c, exists := m.clientMap[ key ]
|
||||
if !exists {
|
||||
c, err := integration.NewClient(i, m.RequestDataUpdates, m.Events, m.Errors)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m.clientMap[ key ] = c
|
||||
return nil
|
||||
}
|
||||
return c.Update(i)
|
||||
}
|
||||
|
||||
func (m *manager) RequestAll() {
|
||||
for _, c := range m.clientMap {
|
||||
go c.Request()
|
||||
}
|
||||
}
|
||||
123
backend/services/integrations/integration/bugsnag.go
Normal file
123
backend/services/integrations/integration/bugsnag.go
Normal file
|
|
@ -0,0 +1,123 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"time"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"openreplay/backend/pkg/utime"
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
- Bugsnag messages usually recived later then others
|
||||
*/
|
||||
|
||||
type bugsnag struct {
|
||||
BugsnagProjectId string // `json:"bugsnag_project_id"`
|
||||
AuthorizationToken string // `json:"auth_token"`
|
||||
}
|
||||
|
||||
|
||||
type bugsnagEvent struct {
|
||||
MetaData struct {
|
||||
SpecialInfo struct {
|
||||
AsayerSessionId uint64 `json:"asayerSessionId,string"`
|
||||
OpenReplaySessionToken string `json:"openReplaySessionToken"`
|
||||
} `json:"special_info"`
|
||||
} `json:"metaData"`
|
||||
ReceivedAt string `json:"received_at"` // can use time.Time as it implements UnmarshalJSON from RFC3339 format
|
||||
Exceptions [1]struct {
|
||||
Message string
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bugsnag) Request(c *client) error {
|
||||
sinceTs := c.getLastMessageTimestamp() + 1000 // From next second
|
||||
sinceFormatted := time.Unix(0, int64(sinceTs*1e6)).Format(time.RFC3339)
|
||||
requestURL := fmt.Sprintf("https://api.bugsnag.com/projects/%v/events", b.BugsnagProjectId)
|
||||
req, err := http.NewRequest("GET", requestURL, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
q := req.URL.Query()
|
||||
// q.Add("per_page", "100") // Up to a maximum of 30. Default: 30
|
||||
// q.Add("sort", "timestamp") // Default: timestamp (timestamp == ReceivedAt ??)
|
||||
q.Add("direction", "asc") // Default: desc
|
||||
q.Add("full_reports", "true") // Default: false
|
||||
q.Add("filters[event.since][][type]", "eq")
|
||||
q.Add("filters[event.since][][value]", sinceFormatted) // seems like inclusively
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
authToken := "token " + b.AuthorizationToken
|
||||
req.Header.Add("Authorization", authToken)
|
||||
req.Header.Add("X-Version", "2")
|
||||
|
||||
for {
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Status code
|
||||
// 401 (unauthorised)
|
||||
if resp.StatusCode >= 400 {
|
||||
io.Copy(ioutil.Discard, resp.Body) // Read the body to free socket
|
||||
return fmt.Errorf("Bugsnag: server respond with the code %v | data: %v ", resp.StatusCode, *b)
|
||||
}
|
||||
|
||||
var jsonEventList []json.RawMessage
|
||||
err = json.NewDecoder(resp.Body).Decode(&jsonEventList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, jsonEvent := range jsonEventList {
|
||||
var e bugsnagEvent
|
||||
err = json.Unmarshal(jsonEvent, &e)
|
||||
if err != nil {
|
||||
c.errChan <- err
|
||||
continue
|
||||
}
|
||||
sessionID := e.MetaData.SpecialInfo.AsayerSessionId
|
||||
token := e.MetaData.SpecialInfo.OpenReplaySessionToken
|
||||
if sessionID == 0 && token == "" {
|
||||
// c.errChan <- "No AsayerSessionId found. | Message: %v", e
|
||||
continue
|
||||
}
|
||||
parsedTime, err := time.Parse(time.RFC3339, e.ReceivedAt)
|
||||
if err != nil {
|
||||
c.errChan <- err
|
||||
continue
|
||||
}
|
||||
timestamp := uint64(utime.ToMilliseconds(parsedTime))
|
||||
c.setLastMessageTimestamp(timestamp)
|
||||
c.evChan <- &SessionErrorEvent{
|
||||
SessionID: sessionID,
|
||||
Token: token,
|
||||
RawErrorEvent: &messages.RawErrorEvent{
|
||||
Source: "bugsnag",
|
||||
Timestamp: timestamp,
|
||||
Name: e.Exceptions[0].Message,
|
||||
Payload: string(jsonEvent),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
linkHeader := resp.Header.Get("Link")
|
||||
if linkHeader == "" {
|
||||
break
|
||||
}
|
||||
|
||||
nextLink := GetLinkFromAngularBrackets(linkHeader)
|
||||
req.URL, err = url.Parse(nextLink)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
9207
backend/services/integrations/integration/bugsnag.json
Normal file
9207
backend/services/integrations/integration/bugsnag.json
Normal file
File diff suppressed because it is too large
Load diff
153
backend/services/integrations/integration/client.go
Normal file
153
backend/services/integrations/integration/client.go
Normal file
|
|
@ -0,0 +1,153 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"fmt"
|
||||
"encoding/json"
|
||||
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/utime"
|
||||
)
|
||||
|
||||
const MAX_ATTEMPTS_IN_A_ROW = 4
|
||||
const MAX_ATTEMPTS = 40
|
||||
const ATTEMPTS_INTERVAL = 3 * 60 * 60 * 1000
|
||||
|
||||
type requester interface {
|
||||
Request(*client) error
|
||||
}
|
||||
|
||||
type requestData struct {
|
||||
LastMessageTimestamp uint64 // `json:"lastMessageTimestamp, string"`
|
||||
LastMessageId string
|
||||
UnsuccessfullAttemptsCount int
|
||||
LastAttemptTimestamp int64
|
||||
}
|
||||
|
||||
type client struct {
|
||||
requestData
|
||||
requester
|
||||
integration *postgres.Integration
|
||||
// TODO: timeout ?
|
||||
mux sync.Mutex
|
||||
updateChan chan<- postgres.Integration
|
||||
evChan chan<- *SessionErrorEvent
|
||||
errChan chan<- error
|
||||
}
|
||||
|
||||
type SessionErrorEvent struct {
|
||||
SessionID uint64
|
||||
Token string
|
||||
*messages.RawErrorEvent
|
||||
}
|
||||
|
||||
type ClientMap map[ string ]*client
|
||||
|
||||
func NewClient(i *postgres.Integration, updateChan chan<- postgres.Integration, evChan chan<- *SessionErrorEvent, errChan chan<- error) (*client, error) {
|
||||
c := new(client)
|
||||
if err := c.Update(i); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(i.RequestData, &c.requestData); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.evChan = evChan
|
||||
c.errChan = errChan
|
||||
c.updateChan = updateChan
|
||||
// TODO: RequestData manager
|
||||
if c.requestData.LastMessageTimestamp == 0 {
|
||||
// ?
|
||||
c.requestData.LastMessageTimestamp = uint64(utime.CurrentTimestamp() - 24*60*60*1000)
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
|
||||
// from outside
|
||||
func (c* client) Update(i *postgres.Integration) error {
|
||||
c.mux.Lock()
|
||||
defer c.mux.Unlock()
|
||||
var r requester
|
||||
switch i.Provider {
|
||||
case "bugsnag":
|
||||
r = new(bugsnag)
|
||||
case "cloudwatch":
|
||||
r = new(cloudwatch)
|
||||
case "datadog":
|
||||
r = new(datadog)
|
||||
case "elasticsearch":
|
||||
r = new(elasticsearch)
|
||||
case "newrelic":
|
||||
r = new(newrelic)
|
||||
case "rollbar":
|
||||
r = new(rollbar)
|
||||
case "sentry":
|
||||
r = new(sentry)
|
||||
case "stackdriver":
|
||||
r = new(stackdriver)
|
||||
case "sumologic":
|
||||
r = new(sumologic)
|
||||
}
|
||||
if err := json.Unmarshal(i.Options, r); err != nil {
|
||||
return err
|
||||
}
|
||||
c.integration = i
|
||||
c.requester = r
|
||||
return nil
|
||||
}
|
||||
|
||||
// package scope
|
||||
func (c *client) setLastMessageTimestamp(timestamp uint64) {
|
||||
if timestamp > c.requestData.LastMessageTimestamp {
|
||||
c.requestData.LastMessageTimestamp = timestamp
|
||||
}
|
||||
}
|
||||
func (c *client) getLastMessageTimestamp() uint64 {
|
||||
return c.requestData.LastMessageTimestamp
|
||||
}
|
||||
func (c *client) setLastMessageId(timestamp uint64, id string) {
|
||||
//if timestamp >= c.requestData.LastMessageTimestamp {
|
||||
c.requestData.LastMessageId = id
|
||||
c.requestData.LastMessageTimestamp = timestamp
|
||||
//}
|
||||
}
|
||||
func (c *client) getLastMessageId() string {
|
||||
return c.requestData.LastMessageId
|
||||
}
|
||||
|
||||
func (c *client) handleError(err error) {
|
||||
c.errChan <- fmt.Errorf("%v | Integration: %v", err, *c.integration)
|
||||
}
|
||||
|
||||
// Thread-safe
|
||||
func (c *client) Request() {
|
||||
c.mux.Lock()
|
||||
defer c.mux.Unlock()
|
||||
if c.requestData.UnsuccessfullAttemptsCount >= MAX_ATTEMPTS ||
|
||||
(c.requestData.UnsuccessfullAttemptsCount >= MAX_ATTEMPTS_IN_A_ROW &&
|
||||
utime.CurrentTimestamp() - c.requestData.LastAttemptTimestamp < ATTEMPTS_INTERVAL) {
|
||||
return
|
||||
}
|
||||
|
||||
c.requestData.LastAttemptTimestamp = utime.CurrentTimestamp()
|
||||
err := c.requester.Request(c)
|
||||
if err != nil {
|
||||
c.handleError(err)
|
||||
c.requestData.UnsuccessfullAttemptsCount++;
|
||||
} else {
|
||||
c.requestData.UnsuccessfullAttemptsCount = 0
|
||||
}
|
||||
rd, err := json.Marshal(c.requestData)
|
||||
if err != nil {
|
||||
c.handleError(err)
|
||||
}
|
||||
// RequestData is a byte array (pointer-like type), but it's replacement
|
||||
// won't affect the previous value sent by channel
|
||||
c.integration.RequestData = rd
|
||||
c.updateChan <- *c.integration
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue