Merge remote-tracking branch 'origin/dev' into api-v1.5.5
This commit is contained in:
commit
cc692dfb29
67 changed files with 946 additions and 1063 deletions
|
|
@ -5,39 +5,38 @@ from chalicelib.core import users
|
|||
|
||||
|
||||
def get_state(tenant_id):
|
||||
my_projects = projects.get_projects(tenant_id=tenant_id, recording_state=False)
|
||||
pids = [s["projectId"] for s in my_projects]
|
||||
pids = projects.get_projects_ids(tenant_id=tenant_id)
|
||||
with pg_client.PostgresClient() as cur:
|
||||
recorded = False
|
||||
meta = False
|
||||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
SELECT
|
||||
COUNT(*)
|
||||
FROM public.sessions AS s
|
||||
where s.project_id IN %(ids)s
|
||||
LIMIT 1;""",
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)})
|
||||
)
|
||||
recorded = cur.fetchone()["count"] > 0
|
||||
recorded = cur.fetchone()["exists"]
|
||||
meta = False
|
||||
if recorded:
|
||||
cur.execute("""SELECT SUM((SELECT COUNT(t.meta)
|
||||
FROM (VALUES (p.metadata_1), (p.metadata_2), (p.metadata_3), (p.metadata_4), (p.metadata_5),
|
||||
(p.metadata_6), (p.metadata_7), (p.metadata_8), (p.metadata_9), (p.metadata_10),
|
||||
(sessions.user_id)) AS t(meta)
|
||||
WHERE t.meta NOTNULL))
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 'defined'
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id=p.project_id AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON(TRUE)
|
||||
WHERE p.deleted_at ISNULL;"""
|
||||
)
|
||||
cur.execute("""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = p.project_id
|
||||
AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||
WHERE p.deleted_at ISNULL
|
||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;""")
|
||||
|
||||
meta = cur.fetchone()["sum"] > 0
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return [
|
||||
{"task": "Install OpenReplay",
|
||||
|
|
@ -58,22 +57,18 @@ def get_state(tenant_id):
|
|||
|
||||
|
||||
def get_state_installing(tenant_id):
|
||||
my_projects = projects.get_projects(tenant_id=tenant_id, recording_state=False)
|
||||
pids = [s["projectId"] for s in my_projects]
|
||||
pids = projects.get_projects_ids(tenant_id=tenant_id)
|
||||
with pg_client.PostgresClient() as cur:
|
||||
recorded = False
|
||||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
SELECT
|
||||
COUNT(*)
|
||||
FROM public.sessions AS s
|
||||
where s.project_id IN %(ids)s
|
||||
LIMIT 1;""",
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)})
|
||||
)
|
||||
recorded = cur.fetchone()["count"] > 0
|
||||
recorded = cur.fetchone()["exists"]
|
||||
|
||||
return {"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
|
|
@ -82,20 +77,23 @@ def get_state_installing(tenant_id):
|
|||
|
||||
def get_state_identify_users(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
"""SELECT SUM((SELECT COUNT(t.meta)
|
||||
FROM (VALUES (p.metadata_1), (p.metadata_2), (p.metadata_3), (p.metadata_4), (p.metadata_5),
|
||||
(p.metadata_6), (p.metadata_7), (p.metadata_8), (p.metadata_9), (p.metadata_10),
|
||||
(sessions.user_id)) AS t(meta)
|
||||
WHERE t.meta NOTNULL))
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 'defined'
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id=p.project_id AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON(TRUE)
|
||||
WHERE p.deleted_at ISNULL;""")
|
||||
cur.execute("""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = p.project_id
|
||||
AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||
WHERE p.deleted_at ISNULL
|
||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;""")
|
||||
|
||||
meta = cur.fetchone()["sum"] > 0
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return {"task": "Identify Users",
|
||||
"done": meta,
|
||||
|
|
|
|||
|
|
@ -281,3 +281,13 @@ def update_capture_status(project_id, changes):
|
|||
)
|
||||
|
||||
return changes
|
||||
|
||||
|
||||
def get_projects_ids(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(f"""SELECT s.project_id
|
||||
FROM public.projects AS s
|
||||
WHERE s.deleted_at IS NULL
|
||||
ORDER BY s.project_id;""")
|
||||
rows = cur.fetchall()
|
||||
return [r["project_id"] for r in rows]
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ def compute():
|
|||
RETURNING *,(SELECT email FROM public.users WHERE role='owner' LIMIT 1);"""
|
||||
)
|
||||
data = cur.fetchone()
|
||||
requests.post('https://parrot.asayer.io/os/telemetry', json={"stats": [process_data(data)]})
|
||||
requests.post('https://api.openreplay.com/os/telemetry', json={"stats": [process_data(data)]})
|
||||
|
||||
|
||||
def new_client():
|
||||
|
|
@ -40,4 +40,4 @@ def new_client():
|
|||
(SELECT email FROM public.users WHERE role='owner' LIMIT 1) AS email
|
||||
FROM public.tenants;""")
|
||||
data = cur.fetchone()
|
||||
requests.post('https://parrot.asayer.io/os/signup', json=process_data(data))
|
||||
requests.post('https://api.openreplay.com/os/signup', json=process_data(data))
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.13-alpine3.10 AS prepare
|
||||
FROM golang:1.18-alpine3.15 AS prepare
|
||||
|
||||
RUN apk add --no-cache git openssh openssl-dev pkgconf gcc g++ make libc-dev bash
|
||||
|
||||
|
|
|
|||
|
|
@ -1,14 +1,12 @@
|
|||
module openreplay/backend
|
||||
|
||||
go 1.13
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
cloud.google.com/go/logging v1.4.2
|
||||
github.com/ClickHouse/clickhouse-go v1.4.3
|
||||
github.com/Masterminds/squirrel v1.5.0
|
||||
github.com/aws/aws-sdk-go v1.35.23
|
||||
github.com/btcsuite/btcutil v1.0.2
|
||||
github.com/confluentinc/confluent-kafka-go v1.7.0 // indirect
|
||||
github.com/elastic/go-elasticsearch/v7 v7.13.1
|
||||
github.com/go-redis/redis v6.15.9+incompatible
|
||||
github.com/google/uuid v1.1.2
|
||||
|
|
@ -16,14 +14,47 @@ require (
|
|||
github.com/jackc/pgconn v1.6.0
|
||||
github.com/jackc/pgerrcode v0.0.0-20201024163028-a0d42d470451
|
||||
github.com/jackc/pgx/v4 v4.6.0
|
||||
github.com/klauspost/compress v1.11.9 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5
|
||||
github.com/lib/pq v1.2.0
|
||||
github.com/oschwald/maxminddb-golang v1.7.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
|
||||
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420
|
||||
google.golang.org/api v0.50.0
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0
|
||||
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.84.0 // indirect
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect
|
||||
github.com/confluentinc/confluent-kafka-go v1.7.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
github.com/jackc/pgio v1.0.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.0.2 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8 // indirect
|
||||
github.com/jackc/pgtype v1.3.0 // indirect
|
||||
github.com/jackc/puddle v1.1.0 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jstemmer/go-junit-report v0.9.1 // indirect
|
||||
github.com/klauspost/compress v1.11.9 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
|
||||
golang.org/x/mod v0.4.2 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 // indirect
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/tools v0.1.4 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 // indirect
|
||||
google.golang.org/grpc v1.38.0 // indirect
|
||||
google.golang.org/protobuf v1.26.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.8 // indirect
|
||||
)
|
||||
|
|
|
|||
|
|
@ -46,8 +46,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
|||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/ClickHouse/clickhouse-go v1.4.3 h1:iAFMa2UrQdR5bHJ2/yaSLffZkxpcOYQMCUuKeNXGdqc=
|
||||
github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
|
||||
github.com/Masterminds/squirrel v1.5.0 h1:JukIZisrUXadA9pl3rMkjhiamxiB0cXiu+HGp/Y8cY8=
|
||||
github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/aws/aws-sdk-go v1.35.23 h1:SCP0d0XvyJTDmfnHEQPvBaYi3kea1VNUo7uQmkVgFts=
|
||||
github.com/aws/aws-sdk-go v1.35.23/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
|
||||
|
|
@ -75,8 +73,8 @@ github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnht
|
|||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/confluentinc/confluent-kafka-go v1.5.2 h1:l+qt+a0Okmq0Bdr1P55IX4fiwFJyg0lZQmfHkAFkv7E=
|
||||
github.com/confluentinc/confluent-kafka-go v1.5.2/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg=
|
||||
github.com/confluentinc/confluent-kafka-go v1.7.0 h1:tXh3LWb2Ne0WiU3ng4h5qiGA9XV61rz46w60O+cq8bM=
|
||||
github.com/confluentinc/confluent-kafka-go v1.7.0/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
|
|
@ -93,7 +91,6 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y
|
|||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
|
|
@ -135,7 +132,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS
|
|||
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
|
|
@ -152,11 +148,9 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=
|
||||
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
|
|
@ -184,7 +178,6 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
|||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0=
|
||||
github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo=
|
||||
github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk=
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8=
|
||||
|
|
@ -203,7 +196,6 @@ github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2 h1:JVX6jT/XfzNqIjye47
|
|||
github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A=
|
||||
github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA=
|
||||
github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg=
|
||||
|
|
@ -219,7 +211,6 @@ github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCM
|
|||
github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw=
|
||||
github.com/jackc/pgtype v1.3.0 h1:l8JvKrby3RI7Kg3bYEeU9TA4vqC38QDpFCfcrC7KuN0=
|
||||
github.com/jackc/pgtype v1.3.0/go.mod h1:b0JqxHvPmljG+HQ5IsvQ0yqeSi4nGcDTVjFoiLDb0Ik=
|
||||
github.com/jackc/pgx v3.6.2+incompatible h1:2zP5OD7kiyR3xzRYMhOcXVvkDZsImVXfj+yIyTQf3/o=
|
||||
github.com/jackc/pgx v3.6.2+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y=
|
||||
github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM=
|
||||
|
|
@ -254,10 +245,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw=
|
||||
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o=
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk=
|
||||
github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
|
||||
|
|
@ -682,8 +669,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
|
|||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.5.2 h1:g0WBLy6fobNUU8W/e9zx6I0Yl79Ya+BDW1NwzAlTiiQ=
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.5.2/go.mod h1:ZdI3yfYmdNSLQPNCpO1y00EHyWaHG5EnQEyL/ntAegY=
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0 h1:+RlmciBLDd/XwM1iudiG3HtCg45purnsOxEoY/+JZdQ=
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0/go.mod h1:ZdI3yfYmdNSLQPNCpO1y00EHyWaHG5EnQEyL/ntAegY=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
|
|
|
|||
|
|
@ -28,30 +28,6 @@ func (c *PGCache) InsertIssueEvent(sessionID uint64, crash *IssueEvent) error {
|
|||
return c.Conn.InsertIssueEvent(sessionID, session.ProjectID, crash)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertUserID(sessionID uint64, userID *IOSUserID) error {
|
||||
if err := c.Conn.InsertIOSUserID(sessionID, userID); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
session.UserID = &userID.Value
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertUserAnonymousID(sessionID uint64, userAnonymousID *IOSUserAnonymousID) error {
|
||||
if err := c.Conn.InsertIOSUserAnonymousID(sessionID, userAnonymousID); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
session.UserAnonymousID = &userAnonymousID.Value
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertMetadata(sessionID uint64, metadata *Metadata) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
if err != nil {
|
||||
|
|
@ -1,228 +0,0 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
sq "github.com/Masterminds/squirrel"
|
||||
"log"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TimeString sql.NullString
|
||||
type query struct {
|
||||
Left string `db:"query.left" json:"left"`
|
||||
Operator string `db:"query.operator" json:"operator"`
|
||||
Right float64 `db:"query.right" json:"right"`
|
||||
}
|
||||
type options struct {
|
||||
RenotifyInterval int64 `db:"options.renotifyInterval" json:"renotifyInterval"`
|
||||
LastNotification int64 `db:"options.lastNotification" json:"lastNotification;omitempty"`
|
||||
CurrentPeriod int64 `db:"options.currentPeriod" json:"currentPeriod"`
|
||||
PreviousPeriod int64 `db:"options.previousPeriod" json:"previousPeriod;omitempty"`
|
||||
Message []map[string]string `db:"options.message" json:"message;omitempty"`
|
||||
Change string `db:"options.change" json:"change;omitempty"`
|
||||
}
|
||||
type Alert struct {
|
||||
AlertID uint32 `db:"alert_id" json:"alert_id"`
|
||||
ProjectID uint32 `db:"project_id" json:"project_id"`
|
||||
Name string `db:"name" json:"name"`
|
||||
Description sql.NullString `db:"description" json:"description"`
|
||||
Active bool `db:"active" json:"active"`
|
||||
DetectionMethod string `db:"detection_method" json:"detection_method"`
|
||||
Query query `db:"query" json:"query"`
|
||||
DeletedAt *int64 `db:"deleted_at" json:"deleted_at"`
|
||||
CreatedAt *int64 `db:"created_at" json:"created_at"`
|
||||
Options options `db:"options" json:"options"`
|
||||
TenantId uint32 `db:"tenant_id" json:"tenant_id"`
|
||||
}
|
||||
|
||||
func (pg *Conn) IterateAlerts(iter func(alert *Alert, err error)) error {
|
||||
rows, err := pg.query(`
|
||||
SELECT
|
||||
alerts.alert_id,
|
||||
alerts.project_id,
|
||||
alerts.name,
|
||||
alerts.description,
|
||||
alerts.active,
|
||||
alerts.detection_method,
|
||||
alerts.query,
|
||||
CAST(EXTRACT(epoch FROM alerts.deleted_at) * 1000 AS BIGINT) AS deleted_at,
|
||||
CAST(EXTRACT(epoch FROM alerts.created_at) * 1000 AS BIGINT) AS created_at,
|
||||
alerts.options,
|
||||
0 AS tenant_id
|
||||
FROM public.alerts
|
||||
WHERE alerts.active AND alerts.deleted_at ISNULL;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
a := new(Alert)
|
||||
if err = rows.Scan(
|
||||
&a.AlertID,
|
||||
&a.ProjectID,
|
||||
&a.Name,
|
||||
&a.Description,
|
||||
&a.Active,
|
||||
&a.DetectionMethod,
|
||||
&a.Query,
|
||||
&a.DeletedAt,
|
||||
&a.CreatedAt,
|
||||
&a.Options,
|
||||
&a.TenantId,
|
||||
); err != nil {
|
||||
iter(nil, err)
|
||||
continue
|
||||
}
|
||||
iter(a, nil)
|
||||
}
|
||||
|
||||
if err = rows.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pg *Conn) SaveLastNotification(allIds []uint32) error {
|
||||
var paramrefs string
|
||||
for _, v := range allIds {
|
||||
paramrefs += strconv.Itoa(int(v)) + `,`
|
||||
}
|
||||
paramrefs = paramrefs[:len(paramrefs)-1] // remove last ","
|
||||
q := "UPDATE public.Alerts SET options = options||'{\"lastNotification\":" + strconv.Itoa(int(time.Now().Unix()*1000)) + "}'::jsonb WHERE alert_id IN (" + paramrefs + ");"
|
||||
//log.Println(q)
|
||||
log.Println("Updating PG")
|
||||
return pg.exec(q)
|
||||
}
|
||||
|
||||
type columnDefinition struct {
|
||||
table string
|
||||
formula string
|
||||
condition string
|
||||
group string
|
||||
}
|
||||
|
||||
var LeftToDb = map[string]columnDefinition{
|
||||
"performance.dom_content_loaded.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "COALESCE(AVG(NULLIF(dom_content_loaded_time ,0)),0)"},
|
||||
"performance.first_meaningful_paint.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "COALESCE(AVG(NULLIF(first_contentful_paint_time,0)),0)"},
|
||||
"performance.page_load_time.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(load_time ,0))"},
|
||||
"performance.dom_build_time.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(dom_building_time,0))"},
|
||||
"performance.speed_index.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(speed_index,0))"},
|
||||
"performance.page_response_time.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(response_time,0))"},
|
||||
"performance.ttfb.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(first_paint_time,0))"},
|
||||
"performance.time_to_render.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(visually_complete,0))"},
|
||||
"performance.image_load_time.average": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(resources.duration,0))", condition: "type='img'"},
|
||||
"performance.request_load_time.average": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(resources.duration,0))", condition: "type='fetch'"},
|
||||
"resources.load_time.average": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(resources.duration,0))"},
|
||||
"resources.missing.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(DISTINCT url_hostpath)", condition: "success= FALSE"},
|
||||
"errors.4xx_5xx.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(session_id)", condition: "status/100!=2"},
|
||||
"errors.4xx.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(session_id)", condition: "status/100=4"},
|
||||
"errors.5xx.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(session_id)", condition: "status/100=5"},
|
||||
"errors.javascript.impacted_sessions.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(DISTINCT session_id)", condition: "success= FALSE AND type='script'"},
|
||||
"performance.crashes.count": {table: "(SELECT *, start_ts AS timestamp FROM public.sessions WHERE errors_count > 0) AS sessions", formula: "COUNT(DISTINCT session_id)", condition: "errors_count > 0"},
|
||||
"errors.javascript.count": {table: "events.errors INNER JOIN public.errors AS m_errors USING (error_id)", formula: "COUNT(DISTINCT session_id)", condition: "source='js_exception'"},
|
||||
"errors.backend.count": {table: "events.errors INNER JOIN public.errors AS m_errors USING (error_id)", formula: "COUNT(DISTINCT session_id)", condition: "source!='js_exception'"},
|
||||
}
|
||||
|
||||
//This is the frequency of execution for each threshold
|
||||
var TimeInterval = map[int64]int64{
|
||||
15: 3,
|
||||
30: 5,
|
||||
60: 10,
|
||||
120: 20,
|
||||
240: 30,
|
||||
1440: 60,
|
||||
}
|
||||
|
||||
func (a *Alert) CanCheck() bool {
|
||||
now := time.Now().Unix() * 1000
|
||||
var repetitionBase int64
|
||||
|
||||
if repetitionBase = a.Options.CurrentPeriod; a.DetectionMethod == "change" && a.Options.CurrentPeriod > a.Options.PreviousPeriod {
|
||||
repetitionBase = a.Options.PreviousPeriod
|
||||
}
|
||||
|
||||
if _, ok := TimeInterval[repetitionBase]; !ok {
|
||||
log.Printf("repetitionBase: %d NOT FOUND", repetitionBase)
|
||||
return false
|
||||
}
|
||||
return a.DeletedAt == nil && a.Active &&
|
||||
(a.Options.RenotifyInterval <= 0 ||
|
||||
a.Options.LastNotification <= 0 ||
|
||||
((now - a.Options.LastNotification) > a.Options.RenotifyInterval*60*1000)) &&
|
||||
((now-*a.CreatedAt)%(TimeInterval[repetitionBase]*60*1000)) < 60*1000
|
||||
}
|
||||
|
||||
func (a *Alert) Build() (sq.SelectBuilder, error) {
|
||||
colDef, ok := LeftToDb[a.Query.Left]
|
||||
if !ok {
|
||||
return sq.Select(), errors.New(fmt.Sprintf("!! unsupported metric '%s' from alert: %d:%s\n", a.Query.Left, a.AlertID, a.Name))
|
||||
}
|
||||
|
||||
subQ := sq.
|
||||
Select(colDef.formula + " AS value").
|
||||
From(colDef.table).
|
||||
Where(sq.And{sq.Expr("project_id = $1 ", a.ProjectID),
|
||||
sq.Expr(colDef.condition)})
|
||||
q := sq.Select(fmt.Sprint("value, coalesce(value,0)", a.Query.Operator, a.Query.Right, " AS valid"))
|
||||
if len(colDef.group) > 0 {
|
||||
subQ = subQ.Column(colDef.group + " AS group_value")
|
||||
subQ = subQ.GroupBy(colDef.group)
|
||||
q = q.Column("group_value")
|
||||
}
|
||||
|
||||
if a.DetectionMethod == "threshold" {
|
||||
q = q.FromSelect(subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60)), "stat")
|
||||
} else if a.DetectionMethod == "change" {
|
||||
if a.Options.Change == "change" {
|
||||
if len(colDef.group) == 0 {
|
||||
sub1, args1, _ := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60)).ToSql()
|
||||
sub2, args2, _ := subQ.Where(
|
||||
sq.And{
|
||||
sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
||||
sq.Expr("timestamp>=$4 ", time.Now().Unix()-2*a.Options.CurrentPeriod*60),
|
||||
}).ToSql()
|
||||
sub1, _, _ = sq.Expr("SELECT ((" + sub1 + ")-(" + sub2 + ")) AS value").ToSql()
|
||||
q = q.JoinClause("FROM ("+sub1+") AS stat", append(args1, args2...)...)
|
||||
} else {
|
||||
subq1 := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60))
|
||||
sub2, args2, _ := subQ.Where(
|
||||
sq.And{
|
||||
sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
||||
sq.Expr("timestamp>=$4 ", time.Now().Unix()-2*a.Options.CurrentPeriod*60),
|
||||
}).ToSql()
|
||||
sub1 := sq.Select("group_value", "(stat1.value-stat2.value) AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...)
|
||||
q = q.FromSelect(sub1, "stat")
|
||||
}
|
||||
} else if a.Options.Change == "percent" {
|
||||
if len(colDef.group) == 0 {
|
||||
sub1, args1, _ := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60)).ToSql()
|
||||
sub2, args2, _ := subQ.Where(
|
||||
sq.And{
|
||||
sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
||||
sq.Expr("timestamp>=$4 ", time.Now().Unix()-a.Options.PreviousPeriod*60-a.Options.CurrentPeriod*60),
|
||||
}).ToSql()
|
||||
sub1, _, _ = sq.Expr("SELECT ((" + sub1 + ")/(" + sub2 + ")-1)*100 AS value").ToSql()
|
||||
q = q.JoinClause("FROM ("+sub1+") AS stat", append(args1, args2...)...)
|
||||
} else {
|
||||
subq1 := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60))
|
||||
sub2, args2, _ := subQ.Where(
|
||||
sq.And{
|
||||
sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
||||
sq.Expr("timestamp>=$4 ", time.Now().Unix()-a.Options.PreviousPeriod*60-a.Options.CurrentPeriod*60),
|
||||
}).ToSql()
|
||||
sub1 := sq.Select("group_value", "(stat1.value/stat2.value-1)*100 AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...)
|
||||
q = q.FromSelect(sub1, "stat")
|
||||
}
|
||||
} else {
|
||||
return q, errors.New("unsupported change method")
|
||||
}
|
||||
|
||||
} else {
|
||||
return q, errors.New("unsupported detection method")
|
||||
}
|
||||
return q, nil
|
||||
}
|
||||
|
|
@ -11,7 +11,6 @@ import (
|
|||
type Listener struct {
|
||||
conn *pgx.Conn
|
||||
Integrations chan *Integration
|
||||
Alerts chan *Alert
|
||||
Errors chan error
|
||||
}
|
||||
|
||||
|
|
@ -32,23 +31,6 @@ func NewIntegrationsListener(url string) (*Listener, error) {
|
|||
return listener, nil
|
||||
}
|
||||
|
||||
func NewAlertsListener(url string) (*Listener, error) {
|
||||
conn, err := pgx.Connect(context.Background(), url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
listener := &Listener{
|
||||
conn: conn,
|
||||
Errors: make(chan error),
|
||||
}
|
||||
listener.Alerts = make(chan *Alert, 50)
|
||||
if _, err := conn.Exec(context.Background(), "LISTEN alert"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go listener.listen()
|
||||
return listener, nil
|
||||
}
|
||||
|
||||
func (listener *Listener) listen() {
|
||||
for {
|
||||
notification, err := listener.conn.WaitForNotification(context.Background())
|
||||
|
|
@ -64,13 +46,6 @@ func (listener *Listener) listen() {
|
|||
} else {
|
||||
listener.Integrations <- integrationP
|
||||
}
|
||||
case "alert":
|
||||
alertP := new(Alert)
|
||||
if err := json.Unmarshal([]byte(notification.Payload), alertP); err != nil {
|
||||
listener.Errors <- fmt.Errorf("%v | Payload: %v", err, notification.Payload)
|
||||
} else {
|
||||
listener.Alerts <- alertP
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,15 +1,14 @@
|
|||
package queue
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/redisstream"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/redisstream"
|
||||
)
|
||||
|
||||
func NewConsumer(group string, topics []string, handler types.MessageHandler) types.Consumer {
|
||||
func NewConsumer(group string, topics []string, handler types.MessageHandler, _ bool) types.Consumer {
|
||||
return redisstream.NewConsumer(group, topics, handler)
|
||||
}
|
||||
|
||||
func NewProducer() types.Producer {
|
||||
return redisstream.NewProducer()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,13 +7,12 @@ import (
|
|||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
|
||||
func NewMessageConsumer(group string, topics []string, handler types.DecodedMessageHandler) types.Consumer {
|
||||
func NewMessageConsumer(group string, topics []string, handler types.DecodedMessageHandler, autoCommit bool) types.Consumer {
|
||||
return NewConsumer(group, topics, func(sessionID uint64, value []byte, meta *types.Meta) {
|
||||
if err := messages.ReadBatch(value, func(msg messages.Message) {
|
||||
handler(sessionID, msg, meta)
|
||||
}); err != nil {
|
||||
log.Printf("Decode error: %v\n", err)
|
||||
}
|
||||
})
|
||||
}, autoCommit)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,26 +6,22 @@ import (
|
|||
|
||||
type Consumer interface {
|
||||
ConsumeNext() error
|
||||
DisableAutoCommit()
|
||||
Commit() error
|
||||
CommitBack(gap int64) error
|
||||
Close()
|
||||
}
|
||||
|
||||
|
||||
type Producer interface {
|
||||
Produce(topic string, key uint64, value []byte) error
|
||||
Close(timeout int)
|
||||
Flush(timeout int)
|
||||
}
|
||||
|
||||
|
||||
type Meta struct {
|
||||
ID uint64
|
||||
Topic string
|
||||
ID uint64
|
||||
Topic string
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
type MessageHandler func(uint64, []byte, *Meta)
|
||||
type DecodedMessageHandler func(uint64, messages.Message, *Meta)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,24 +1,22 @@
|
|||
package redisstream
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"log"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
_redis "github.com/go-redis/redis"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
|
||||
|
||||
type idsInfo struct{
|
||||
id []string
|
||||
ts []int64
|
||||
type idsInfo struct {
|
||||
id []string
|
||||
ts []int64
|
||||
}
|
||||
type streamPendingIDsMap map[string]*idsInfo
|
||||
|
||||
|
|
@ -41,26 +39,25 @@ func NewConsumer(group string, streams []string, messageHandler types.MessageHan
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
idsPending := make(streamPendingIDsMap)
|
||||
|
||||
streamsCount := len(streams)
|
||||
for i := 0; i < streamsCount; i++ {
|
||||
// ">" is for never-delivered messages.
|
||||
// Otherwise - never acknoledged only
|
||||
// ">" is for never-delivered messages.
|
||||
// Otherwise - never acknoledged only
|
||||
// TODO: understand why in case of "0" it eats 100% cpu
|
||||
streams = append(streams, ">")
|
||||
|
||||
streams = append(streams, ">")
|
||||
|
||||
idsPending[streams[i]] = new(idsInfo)
|
||||
}
|
||||
|
||||
return &Consumer{
|
||||
redis: redis,
|
||||
redis: redis,
|
||||
messageHandler: messageHandler,
|
||||
streams: streams,
|
||||
group: group,
|
||||
autoCommit: true,
|
||||
idsPending: idsPending,
|
||||
streams: streams,
|
||||
group: group,
|
||||
autoCommit: true,
|
||||
idsPending: idsPending,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -106,9 +103,9 @@ func (c *Consumer) ConsumeNext() error {
|
|||
return errors.New("Too many messages per ms in redis")
|
||||
}
|
||||
c.messageHandler(sessionID, []byte(valueString), &types.Meta{
|
||||
Topic: r.Stream,
|
||||
Topic: r.Stream,
|
||||
Timestamp: int64(ts),
|
||||
ID: ts << 13 | (idx & 0x1FFF), // Max: 4096 messages/ms for 69 years
|
||||
ID: ts<<13 | (idx & 0x1FFF), // Max: 4096 messages/ms for 69 years
|
||||
})
|
||||
if c.autoCommit {
|
||||
if err = c.redis.XAck(r.Stream, c.group, m.ID).Err(); err != nil {
|
||||
|
|
@ -119,7 +116,7 @@ func (c *Consumer) ConsumeNext() error {
|
|||
c.idsPending[r.Stream].id = append(c.idsPending[r.Stream].id, m.ID)
|
||||
c.idsPending[r.Stream].ts = append(c.idsPending[r.Stream].ts, int64(ts))
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
@ -158,13 +155,9 @@ func (c *Consumer) CommitBack(gap int64) error {
|
|||
c.idsPending[stream].id = idsInfo.id[maxI:]
|
||||
c.idsPending[stream].ts = idsInfo.ts[maxI:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Consumer) DisableAutoCommit() {
|
||||
//c.autoCommit = false
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Consumer) Close() {
|
||||
// noop
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,8 +22,8 @@ func NewTokenizer(secret string) *Tokenizer {
|
|||
}
|
||||
|
||||
type TokenData struct {
|
||||
ID uint64
|
||||
ExpTime int64
|
||||
ID uint64
|
||||
ExpTime int64
|
||||
}
|
||||
|
||||
func (tokenizer *Tokenizer) sign(body string) []byte {
|
||||
|
|
@ -33,7 +33,7 @@ func (tokenizer *Tokenizer) sign(body string) []byte {
|
|||
}
|
||||
|
||||
func (tokenizer *Tokenizer) Compose(d TokenData) string {
|
||||
body := strconv.FormatUint(d.ID, 36) +
|
||||
body := strconv.FormatUint(d.ID, 36) +
|
||||
"." + strconv.FormatInt(d.ExpTime, 36)
|
||||
sign := base58.Encode(tokenizer.sign(body))
|
||||
return body + "." + sign
|
||||
|
|
@ -58,8 +58,8 @@ func (tokenizer *Tokenizer) Parse(token string) (*TokenData, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if expTime <= time.Now().UnixNano()/1e6 {
|
||||
return &TokenData{id,expTime}, EXPIRED
|
||||
if expTime <= time.Now().UnixMilli() {
|
||||
return &TokenData{id, expTime}, EXPIRED
|
||||
}
|
||||
return &TokenData{id,expTime}, nil
|
||||
return &TokenData{id, expTime}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,11 +5,18 @@ import (
|
|||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
)
|
||||
|
||||
func getSessionKey(sessionID uint64) string {
|
||||
// Based on timestamp, changes once per week. Check pkg/flakeid for understanding sessionID
|
||||
return strconv.FormatUint(sessionID>>50, 10)
|
||||
return strconv.FormatUint(
|
||||
uint64(time.UnixMilli(
|
||||
int64(flakeid.ExtractTimestamp(sessionID)),
|
||||
).Weekday()),
|
||||
10,
|
||||
)
|
||||
}
|
||||
|
||||
func ResolveURL(baseurl string, rawurl string) string {
|
||||
|
|
|
|||
|
|
@ -1,11 +0,0 @@
|
|||
package utime
|
||||
|
||||
import "time"
|
||||
|
||||
func CurrentTimestamp() int64 {
|
||||
return time.Now().UnixNano() / 1e6
|
||||
}
|
||||
|
||||
func ToMilliseconds(t time.Time) int64 {
|
||||
return t.UnixNano() / 1e6
|
||||
}
|
||||
|
|
@ -18,7 +18,7 @@ import (
|
|||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
GROUP_CACHE := env.String("GROUP_CACHE")
|
||||
GROUP_CACHE := env.String("GROUP_CACHE")
|
||||
TOPIC_CACHE := env.String("TOPIC_CACHE")
|
||||
|
||||
cacher := cacher.NewCacher(
|
||||
|
|
@ -29,10 +29,10 @@ func main() {
|
|||
)
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
GROUP_CACHE,
|
||||
[]string{ TOPIC_CACHE },
|
||||
GROUP_CACHE,
|
||||
[]string{TOPIC_CACHE},
|
||||
func(sessionID uint64, message messages.Message, e *types.Meta) {
|
||||
switch msg := message.(type) {
|
||||
switch msg := message.(type) {
|
||||
case *messages.AssetCache:
|
||||
cacher.CacheURL(sessionID, msg.URL)
|
||||
case *messages.ErrorEvent:
|
||||
|
|
@ -47,17 +47,17 @@ func main() {
|
|||
for _, source := range sourceList {
|
||||
cacher.CacheJSFile(source)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
true,
|
||||
)
|
||||
|
||||
|
||||
tick := time.Tick(20 * time.Minute)
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
log.Printf("Cacher service started\n")
|
||||
log.Printf("Cacher service started\n")
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
|
|
@ -74,4 +74,4 @@ func main() {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -74,8 +74,8 @@ func main() {
|
|||
}
|
||||
})
|
||||
},
|
||||
false,
|
||||
)
|
||||
consumer.DisableAutoCommit()
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
|
|
|||
|
|
@ -110,11 +110,11 @@ func (b *builder) buildInputEvent() {
|
|||
|
||||
func (b *builder) handleMessage(message Message, messageID uint64) {
|
||||
timestamp := GetTimestamp(message)
|
||||
if b.timestamp <= timestamp { // unnecessary? TODO: test and remove
|
||||
if b.timestamp < timestamp { // unnecessary? TODO: test and remove
|
||||
b.timestamp = timestamp
|
||||
}
|
||||
|
||||
b.lastProcessedTimestamp = time.Now().UnixNano() / 1e6
|
||||
b.lastProcessedTimestamp = time.Now().UnixMilli()
|
||||
|
||||
// Might happen before the first timestamp.
|
||||
switch msg := message.(type) {
|
||||
|
|
|
|||
|
|
@ -8,12 +8,12 @@ import (
|
|||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"openreplay/backend/pkg/intervals"
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/pkg/intervals"
|
||||
logger "openreplay/backend/pkg/log"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
logger "openreplay/backend/pkg/log"
|
||||
"openreplay/backend/services/ender/builder"
|
||||
)
|
||||
|
||||
|
|
@ -29,24 +29,24 @@ func main() {
|
|||
|
||||
producer := queue.NewProducer()
|
||||
consumer := queue.NewMessageConsumer(
|
||||
GROUP_EVENTS,
|
||||
[]string{
|
||||
GROUP_EVENTS,
|
||||
[]string{
|
||||
env.String("TOPIC_RAW_WEB"),
|
||||
env.String("TOPIC_RAW_IOS"),
|
||||
},
|
||||
},
|
||||
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
|
||||
statsLogger.HandleAndLog(sessionID, meta)
|
||||
builderMap.HandleMessage(sessionID, msg, msg.Meta().Index)
|
||||
},
|
||||
false,
|
||||
)
|
||||
consumer.DisableAutoCommit()
|
||||
|
||||
|
||||
tick := time.Tick(intervals.EVENTS_COMMIT_INTERVAL * time.Millisecond)
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
log.Printf("Ender service started\n")
|
||||
log.Printf("Ender service started\n")
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
|
|
@ -55,8 +55,8 @@ func main() {
|
|||
consumer.CommitBack(intervals.EVENTS_BACK_COMMIT_GAP)
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <- tick:
|
||||
builderMap.IterateReadyMessages(time.Now().UnixNano()/1e6, func(sessionID uint64, readyMsg messages.Message) {
|
||||
case <-tick:
|
||||
builderMap.IterateReadyMessages(time.Now().UnixMilli(), func(sessionID uint64, readyMsg messages.Message) {
|
||||
producer.Produce(TOPIC_TRIGGER, sessionID, messages.Encode(readyMsg))
|
||||
})
|
||||
// TODO: why exactly do we need Flush here and not in any other place?
|
||||
|
|
@ -69,4 +69,3 @@ func main() {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2,55 +2,55 @@ package main
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"errors"
|
||||
"time"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/token"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/token"
|
||||
)
|
||||
|
||||
const FILES_SIZE_LIMIT int64 = 1e7 // 10Mb
|
||||
const FILES_SIZE_LIMIT int64 = 1e7 // 10Mb
|
||||
|
||||
func startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
type request struct {
|
||||
Token string `json:"token"`
|
||||
ProjectKey *string `json:"projectKey"`
|
||||
TrackerVersion string `json:"trackerVersion"`
|
||||
RevID string `json:"revID"`
|
||||
UserUUID *string `json:"userUUID"`
|
||||
Token string `json:"token"`
|
||||
ProjectKey *string `json:"projectKey"`
|
||||
TrackerVersion string `json:"trackerVersion"`
|
||||
RevID string `json:"revID"`
|
||||
UserUUID *string `json:"userUUID"`
|
||||
//UserOS string `json"userOS"` //hardcoded 'MacOS'
|
||||
UserOSVersion string `json:"userOSVersion"`
|
||||
UserDevice string `json:"userDevice"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
UserOSVersion string `json:"userOSVersion"`
|
||||
UserDevice string `json:"userDevice"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
// UserDeviceType uint 0:phone 1:pad 2:tv 3:carPlay 5:mac
|
||||
// “performances”:{
|
||||
// “activeProcessorCount”:8,
|
||||
// “isLowPowerModeEnabled”:0,
|
||||
// “orientation”:0,
|
||||
// “systemUptime”:585430,
|
||||
// “batteryState”:0,
|
||||
// “thermalState”:0,
|
||||
// “batteryLevel”:0,
|
||||
// “processorCount”:8,
|
||||
// “physicalMemory”:17179869184
|
||||
// },
|
||||
// “activeProcessorCount”:8,
|
||||
// “isLowPowerModeEnabled”:0,
|
||||
// “orientation”:0,
|
||||
// “systemUptime”:585430,
|
||||
// “batteryState”:0,
|
||||
// “thermalState”:0,
|
||||
// “batteryLevel”:0,
|
||||
// “processorCount”:8,
|
||||
// “physicalMemory”:17179869184
|
||||
// },
|
||||
}
|
||||
type response struct {
|
||||
Token string `json:"token"`
|
||||
ImagesHashList []string `json:"imagesHashList"`
|
||||
UserUUID string `json:"userUUID"`
|
||||
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
|
||||
SessionID string `json:"sessionID"`
|
||||
Token string `json:"token"`
|
||||
ImagesHashList []string `json:"imagesHashList"`
|
||||
UserUUID string `json:"userUUID"`
|
||||
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
|
||||
SessionID string `json:"sessionID"`
|
||||
}
|
||||
startTime := time.Now()
|
||||
req := &request{}
|
||||
body := http.MaxBytesReader(w, r.Body, JSON_SIZE_LIMIT)
|
||||
//defer body.Close()
|
||||
defer body.Close()
|
||||
if err := json.NewDecoder(body).Decode(req); err != nil {
|
||||
responseWithError(w, http.StatusBadRequest, err)
|
||||
return
|
||||
|
|
@ -85,29 +85,29 @@ func startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
|||
responseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
return
|
||||
}
|
||||
sessionID, err := flaker.Compose(uint64(startTime.UnixNano() / 1e6))
|
||||
sessionID, err := flaker.Compose(uint64(startTime.UnixMilli()))
|
||||
if err != nil {
|
||||
responseWithError(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
|
||||
tokenData = &token.TokenData{sessionID, expTime.UnixNano() / 1e6}
|
||||
tokenData = &token.TokenData{sessionID, expTime.UnixMilli()}
|
||||
|
||||
country := geoIP.ExtractISOCodeFromHTTPRequest(r)
|
||||
|
||||
// The difference with web is mostly here:
|
||||
producer.Produce(TOPIC_RAW_IOS, tokenData.ID, Encode(&IOSSessionStart{
|
||||
Timestamp: req.Timestamp,
|
||||
ProjectID: uint64(p.ProjectID),
|
||||
TrackerVersion: req.TrackerVersion,
|
||||
RevID: req.RevID,
|
||||
UserUUID: userUUID,
|
||||
UserOS: "IOS",
|
||||
UserOSVersion: req.UserOSVersion,
|
||||
UserDevice: MapIOSDevice(req.UserDevice),
|
||||
UserDeviceType: GetIOSDeviceType(req.UserDevice),
|
||||
UserCountry: country,
|
||||
Timestamp: req.Timestamp,
|
||||
ProjectID: uint64(p.ProjectID),
|
||||
TrackerVersion: req.TrackerVersion,
|
||||
RevID: req.RevID,
|
||||
UserUUID: userUUID,
|
||||
UserOS: "IOS",
|
||||
UserOSVersion: req.UserOSVersion,
|
||||
UserDevice: MapIOSDevice(req.UserDevice),
|
||||
UserDeviceType: GetIOSDeviceType(req.UserDevice),
|
||||
UserCountry: country,
|
||||
}))
|
||||
}
|
||||
|
||||
|
|
@ -119,14 +119,13 @@ func startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
responseWithJSON(w, &response{
|
||||
// ImagesHashList: imagesHashList,
|
||||
Token: tokenizer.Compose(*tokenData),
|
||||
UserUUID: userUUID,
|
||||
SessionID: strconv.FormatUint(tokenData.ID, 10),
|
||||
Token: tokenizer.Compose(*tokenData),
|
||||
UserUUID: userUUID,
|
||||
SessionID: strconv.FormatUint(tokenData.ID, 10),
|
||||
BeaconSizeLimit: BEACON_SIZE_LIMIT,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
func pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
sessionData, err := tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
|
|
@ -136,8 +135,6 @@ func pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
|||
pushMessages(w, r, sessionData.ID, TOPIC_RAW_IOS)
|
||||
}
|
||||
|
||||
|
||||
|
||||
func pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
sessionData, err := tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil && err != token.EXPIRED {
|
||||
|
|
@ -145,10 +142,9 @@ func pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
// Check timestamps here?
|
||||
pushMessages(w, r, sessionData.ID,TOPIC_RAW_IOS)
|
||||
pushMessages(w, r, sessionData.ID, TOPIC_RAW_IOS)
|
||||
}
|
||||
|
||||
|
||||
func imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
log.Printf("recieved imagerequest")
|
||||
|
||||
|
|
@ -159,16 +155,16 @@ func imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
r.Body = http.MaxBytesReader(w, r.Body, FILES_SIZE_LIMIT)
|
||||
// defer r.Body.Close()
|
||||
defer r.Body.Close()
|
||||
err = r.ParseMultipartForm(1e6) // ~1Mb
|
||||
if err == http.ErrNotMultipart || err == http.ErrMissingBoundary {
|
||||
responseWithError(w, http.StatusUnsupportedMediaType, err)
|
||||
// } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB
|
||||
// } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB
|
||||
} else if err != nil {
|
||||
responseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
}
|
||||
|
||||
if (r.MultipartForm == nil) {
|
||||
if r.MultipartForm == nil {
|
||||
responseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed"))
|
||||
}
|
||||
|
||||
|
|
@ -177,7 +173,7 @@ func imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
prefix := r.MultipartForm.Value["projectKey"][0] + "/" + strconv.FormatUint(sessionData.ID, 10) + "/"
|
||||
prefix := r.MultipartForm.Value["projectKey"][0] + "/" + strconv.FormatUint(sessionData.ID, 10) + "/"
|
||||
|
||||
for _, fileHeaderList := range r.MultipartForm.File {
|
||||
for _, fileHeader := range fileHeaderList {
|
||||
|
|
@ -187,7 +183,7 @@ func imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
key := prefix + fileHeader.Filename
|
||||
log.Printf("Uploading image... %v", key)
|
||||
go func() { //TODO: mime type from header
|
||||
go func() { //TODO: mime type from header
|
||||
if err := s3.Upload(file, key, "image/jpeg", false); err != nil {
|
||||
log.Printf("Upload ios screen error. %v", err)
|
||||
}
|
||||
|
|
@ -11,8 +11,8 @@ import (
|
|||
"time"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/token"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/token"
|
||||
)
|
||||
|
||||
func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
|
|
@ -30,18 +30,18 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
|||
UserID string `json:"userID"`
|
||||
}
|
||||
type response struct {
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Delay int64 `json:"delay"`
|
||||
Token string `json:"token"`
|
||||
UserUUID string `json:"userUUID"`
|
||||
SessionID string `json:"sessionID"`
|
||||
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Delay int64 `json:"delay"`
|
||||
Token string `json:"token"`
|
||||
UserUUID string `json:"userUUID"`
|
||||
SessionID string `json:"sessionID"`
|
||||
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
req := &request{}
|
||||
body := http.MaxBytesReader(w, r.Body, JSON_SIZE_LIMIT) // what if Body == nil?? // use r.ContentLength to return specific error?
|
||||
//defer body.Close()
|
||||
defer body.Close()
|
||||
if err := json.NewDecoder(body).Decode(req); err != nil {
|
||||
responseWithError(w, http.StatusBadRequest, err)
|
||||
return
|
||||
|
|
@ -76,14 +76,14 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
|||
responseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
return
|
||||
}
|
||||
sessionID, err := flaker.Compose(uint64(startTime.UnixNano() / 1e6))
|
||||
sessionID, err := flaker.Compose(uint64(startTime.UnixMilli()))
|
||||
if err != nil {
|
||||
responseWithError(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
|
||||
tokenData = &token.TokenData{sessionID, expTime.UnixNano() / 1e6}
|
||||
tokenData = &token.TokenData{sessionID, expTime.UnixMilli()}
|
||||
|
||||
country := geoIP.ExtractISOCodeFromHTTPRequest(r)
|
||||
producer.Produce(TOPIC_RAW_WEB, tokenData.ID, Encode(&SessionStart{
|
||||
|
|
@ -102,17 +102,17 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
|||
UserCountry: country,
|
||||
UserDeviceMemorySize: req.DeviceMemory,
|
||||
UserDeviceHeapSize: req.JsHeapSizeLimit,
|
||||
UserID: req.UserID,
|
||||
UserID: req.UserID,
|
||||
}))
|
||||
}
|
||||
|
||||
//delayDuration := time.Now().Sub(startTime)
|
||||
responseWithJSON(w, &response{
|
||||
//Timestamp: startTime.UnixNano() / 1e6,
|
||||
//Delay: delayDuration.Nanoseconds() / 1e6,
|
||||
Token: tokenizer.Compose(*tokenData),
|
||||
UserUUID: userUUID,
|
||||
SessionID: strconv.FormatUint(tokenData.ID, 10),
|
||||
//Timestamp: startTime.UnixMilli(),
|
||||
//Delay: delayDuration.Milliseconds(),
|
||||
Token: tokenizer.Compose(*tokenData),
|
||||
UserUUID: userUUID,
|
||||
SessionID: strconv.FormatUint(tokenData.ID, 10),
|
||||
BeaconSizeLimit: BEACON_SIZE_LIMIT,
|
||||
})
|
||||
}
|
||||
|
|
@ -124,7 +124,7 @@ func pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
body := http.MaxBytesReader(w, r.Body, BEACON_SIZE_LIMIT)
|
||||
//defer body.Close()
|
||||
defer body.Close()
|
||||
buf, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
responseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
|
|
@ -248,4 +248,4 @@ func notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
|||
log.Printf("Unable to insert Unstarted Session: %v\n", err)
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
}
|
||||
|
|
@ -9,11 +9,11 @@ import (
|
|||
gzip "github.com/klauspost/pgzip"
|
||||
)
|
||||
|
||||
const JSON_SIZE_LIMIT int64 = 1e3 // 1Kb
|
||||
const JSON_SIZE_LIMIT int64 = 1e3 // 1Kb
|
||||
|
||||
func pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) {
|
||||
body := http.MaxBytesReader(w, r.Body, BEACON_SIZE_LIMIT)
|
||||
//defer body.Close()
|
||||
defer body.Close()
|
||||
var reader io.ReadCloser
|
||||
var err error
|
||||
switch r.Header.Get("Content-Encoding") {
|
||||
|
|
|
|||
138
backend/services/http/ios-device.go
Normal file
138
backend/services/http/ios-device.go
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func MapIOSDevice(identifier string) string {
|
||||
switch identifier {
|
||||
case "iPod5,1":
|
||||
return "iPod touch (5th generation)"
|
||||
case "iPod7,1":
|
||||
return "iPod touch (6th generation)"
|
||||
case "iPod9,1":
|
||||
return "iPod touch (7th generation)"
|
||||
case "iPhone3,1", "iPhone3,2", "iPhone3,3":
|
||||
return "iPhone 4"
|
||||
case "iPhone4,1":
|
||||
return "iPhone 4s"
|
||||
case "iPhone5,1", "iPhone5,2":
|
||||
return "iPhone 5"
|
||||
case "iPhone5,3", "iPhone5,4":
|
||||
return "iPhone 5c"
|
||||
case "iPhone6,1", "iPhone6,2":
|
||||
return "iPhone 5s"
|
||||
case "iPhone7,2":
|
||||
return "iPhone 6"
|
||||
case "iPhone7,1":
|
||||
return "iPhone 6 Plus"
|
||||
case "iPhone8,1":
|
||||
return "iPhone 6s"
|
||||
case "iPhone8,2":
|
||||
return "iPhone 6s Plus"
|
||||
case "iPhone8,4":
|
||||
return "iPhone SE"
|
||||
case "iPhone9,1", "iPhone9,3":
|
||||
return "iPhone 7"
|
||||
case "iPhone9,2", "iPhone9,4":
|
||||
return "iPhone 7 Plus"
|
||||
case "iPhone10,1", "iPhone10,4":
|
||||
return "iPhone 8"
|
||||
case "iPhone10,2", "iPhone10,5":
|
||||
return "iPhone 8 Plus"
|
||||
case "iPhone10,3", "iPhone10,6":
|
||||
return "iPhone X"
|
||||
case "iPhone11,2":
|
||||
return "iPhone XS"
|
||||
case "iPhone11,4", "iPhone11,6":
|
||||
return "iPhone XS Max"
|
||||
case "iPhone11,8":
|
||||
return "iPhone XR"
|
||||
case "iPhone12,1":
|
||||
return "iPhone 11"
|
||||
case "iPhone12,3":
|
||||
return "iPhone 11 Pro"
|
||||
case "iPhone12,5":
|
||||
return "iPhone 11 Pro Max"
|
||||
case "iPhone12,8":
|
||||
return "iPhone SE (2nd generation)"
|
||||
case "iPhone13,1":
|
||||
return "iPhone 12 mini"
|
||||
case "iPhone13,2":
|
||||
return "iPhone 12"
|
||||
case "iPhone13,3":
|
||||
return "iPhone 12 Pro"
|
||||
case "iPhone13,4":
|
||||
return "iPhone 12 Pro Max"
|
||||
case "iPad2,1", "iPad2,2", "iPad2,3", "iPad2,4":
|
||||
return "iPad 2"
|
||||
case "iPad3,1", "iPad3,2", "iPad3,3":
|
||||
return "iPad (3rd generation)"
|
||||
case "iPad3,4", "iPad3,5", "iPad3,6":
|
||||
return "iPad (4th generation)"
|
||||
case "iPad6,11", "iPad6,12":
|
||||
return "iPad (5th generation)"
|
||||
case "iPad7,5", "iPad7,6":
|
||||
return "iPad (6th generation)"
|
||||
case "iPad7,11", "iPad7,12":
|
||||
return "iPad (7th generation)"
|
||||
case "iPad11,6", "iPad11,7":
|
||||
return "iPad (8th generation)"
|
||||
case "iPad4,1", "iPad4,2", "iPad4,3":
|
||||
return "iPad Air"
|
||||
case "iPad5,3", "iPad5,4":
|
||||
return "iPad Air 2"
|
||||
case "iPad11,3", "iPad11,4":
|
||||
return "iPad Air (3rd generation)"
|
||||
case "iPad13,1", "iPad13,2":
|
||||
return "iPad Air (4th generation)"
|
||||
case "iPad2,5", "iPad2,6", "iPad2,7":
|
||||
return "iPad mini"
|
||||
case "iPad4,4", "iPad4,5", "iPad4,6":
|
||||
return "iPad mini 2"
|
||||
case "iPad4,7", "iPad4,8", "iPad4,9":
|
||||
return "iPad mini 3"
|
||||
case "iPad5,1", "iPad5,2":
|
||||
return "iPad mini 4"
|
||||
case "iPad11,1", "iPad11,2":
|
||||
return "iPad mini (5th generation)"
|
||||
case "iPad6,3", "iPad6,4":
|
||||
return "iPad Pro (9.7-inch)"
|
||||
case "iPad7,3", "iPad7,4":
|
||||
return "iPad Pro (10.5-inch)"
|
||||
case "iPad8,1", "iPad8,2", "iPad8,3", "iPad8,4":
|
||||
return "iPad Pro (11-inch) (1st generation)"
|
||||
case "iPad8,9", "iPad8,10":
|
||||
return "iPad Pro (11-inch) (2nd generation)"
|
||||
case "iPad6,7", "iPad6,8":
|
||||
return "iPad Pro (12.9-inch) (1st generation)"
|
||||
case "iPad7,1", "iPad7,2":
|
||||
return "iPad Pro (12.9-inch) (2nd generation)"
|
||||
case "iPad8,5", "iPad8,6", "iPad8,7", "iPad8,8":
|
||||
return "iPad Pro (12.9-inch) (3rd generation)"
|
||||
case "iPad8,11", "iPad8,12":
|
||||
return "iPad Pro (12.9-inch) (4th generation)"
|
||||
case "AppleTV5,3":
|
||||
return "Apple TV"
|
||||
case "AppleTV6,2":
|
||||
return "Apple TV 4K"
|
||||
case "AudioAccessory1,1":
|
||||
return "HomePod"
|
||||
case "AudioAccessory5,1":
|
||||
return "HomePod mini"
|
||||
case "i386", "x86_64":
|
||||
return "Simulator"
|
||||
default:
|
||||
return identifier
|
||||
}
|
||||
}
|
||||
|
||||
func GetIOSDeviceType(identifier string) string {
|
||||
if strings.Contains(identifier, "iPhone") {
|
||||
return "mobile" //"phone"
|
||||
}
|
||||
if strings.Contains(identifier, "iPad") {
|
||||
return "tablet"
|
||||
}
|
||||
return "other"
|
||||
}
|
||||
|
|
@ -1,79 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func MapIOSDevice(identifier string) string {
|
||||
switch identifier {
|
||||
case "iPod5,1": return "iPod touch (5th generation)"
|
||||
case "iPod7,1": return "iPod touch (6th generation)"
|
||||
case "iPod9,1": return "iPod touch (7th generation)"
|
||||
case "iPhone3,1", "iPhone3,2", "iPhone3,3": return "iPhone 4"
|
||||
case "iPhone4,1": return "iPhone 4s"
|
||||
case "iPhone5,1", "iPhone5,2": return "iPhone 5"
|
||||
case "iPhone5,3", "iPhone5,4": return "iPhone 5c"
|
||||
case "iPhone6,1", "iPhone6,2": return "iPhone 5s"
|
||||
case "iPhone7,2": return "iPhone 6"
|
||||
case "iPhone7,1": return "iPhone 6 Plus"
|
||||
case "iPhone8,1": return "iPhone 6s"
|
||||
case "iPhone8,2": return "iPhone 6s Plus"
|
||||
case "iPhone8,4": return "iPhone SE"
|
||||
case "iPhone9,1", "iPhone9,3": return "iPhone 7"
|
||||
case "iPhone9,2", "iPhone9,4": return "iPhone 7 Plus"
|
||||
case "iPhone10,1", "iPhone10,4": return "iPhone 8"
|
||||
case "iPhone10,2", "iPhone10,5": return "iPhone 8 Plus"
|
||||
case "iPhone10,3", "iPhone10,6": return "iPhone X"
|
||||
case "iPhone11,2": return "iPhone XS"
|
||||
case "iPhone11,4", "iPhone11,6": return "iPhone XS Max"
|
||||
case "iPhone11,8": return "iPhone XR"
|
||||
case "iPhone12,1": return "iPhone 11"
|
||||
case "iPhone12,3": return "iPhone 11 Pro"
|
||||
case "iPhone12,5": return "iPhone 11 Pro Max"
|
||||
case "iPhone12,8": return "iPhone SE (2nd generation)"
|
||||
case "iPhone13,1": return "iPhone 12 mini"
|
||||
case "iPhone13,2": return "iPhone 12"
|
||||
case "iPhone13,3": return "iPhone 12 Pro"
|
||||
case "iPhone13,4": return "iPhone 12 Pro Max"
|
||||
case "iPad2,1", "iPad2,2", "iPad2,3", "iPad2,4":return "iPad 2"
|
||||
case "iPad3,1", "iPad3,2", "iPad3,3": return "iPad (3rd generation)"
|
||||
case "iPad3,4", "iPad3,5", "iPad3,6": return "iPad (4th generation)"
|
||||
case "iPad6,11", "iPad6,12": return "iPad (5th generation)"
|
||||
case "iPad7,5", "iPad7,6": return "iPad (6th generation)"
|
||||
case "iPad7,11", "iPad7,12": return "iPad (7th generation)"
|
||||
case "iPad11,6", "iPad11,7": return "iPad (8th generation)"
|
||||
case "iPad4,1", "iPad4,2", "iPad4,3": return "iPad Air"
|
||||
case "iPad5,3", "iPad5,4": return "iPad Air 2"
|
||||
case "iPad11,3", "iPad11,4": return "iPad Air (3rd generation)"
|
||||
case "iPad13,1", "iPad13,2": return "iPad Air (4th generation)"
|
||||
case "iPad2,5", "iPad2,6", "iPad2,7": return "iPad mini"
|
||||
case "iPad4,4", "iPad4,5", "iPad4,6": return "iPad mini 2"
|
||||
case "iPad4,7", "iPad4,8", "iPad4,9": return "iPad mini 3"
|
||||
case "iPad5,1", "iPad5,2": return "iPad mini 4"
|
||||
case "iPad11,1", "iPad11,2": return "iPad mini (5th generation)"
|
||||
case "iPad6,3", "iPad6,4": return "iPad Pro (9.7-inch)"
|
||||
case "iPad7,3", "iPad7,4": return "iPad Pro (10.5-inch)"
|
||||
case "iPad8,1", "iPad8,2", "iPad8,3", "iPad8,4":return "iPad Pro (11-inch) (1st generation)"
|
||||
case "iPad8,9", "iPad8,10": return "iPad Pro (11-inch) (2nd generation)"
|
||||
case "iPad6,7", "iPad6,8": return "iPad Pro (12.9-inch) (1st generation)"
|
||||
case "iPad7,1", "iPad7,2": return "iPad Pro (12.9-inch) (2nd generation)"
|
||||
case "iPad8,5", "iPad8,6", "iPad8,7", "iPad8,8":return "iPad Pro (12.9-inch) (3rd generation)"
|
||||
case "iPad8,11", "iPad8,12": return "iPad Pro (12.9-inch) (4th generation)"
|
||||
case "AppleTV5,3": return "Apple TV"
|
||||
case "AppleTV6,2": return "Apple TV 4K"
|
||||
case "AudioAccessory1,1": return "HomePod"
|
||||
case "AudioAccessory5,1": return "HomePod mini"
|
||||
case "i386", "x86_64": return "Simulator"
|
||||
default: return identifier
|
||||
}
|
||||
}
|
||||
|
||||
func GetIOSDeviceType(identifier string) string {
|
||||
if strings.Contains(identifier, "iPhone") {
|
||||
return "mobile" //"phone"
|
||||
}
|
||||
if strings.Contains(identifier, "iPad") {
|
||||
return "tablet"
|
||||
}
|
||||
return "other"
|
||||
}
|
||||
|
|
@ -10,19 +10,17 @@ import (
|
|||
|
||||
"golang.org/x/net/http2"
|
||||
|
||||
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/storage"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
"openreplay/backend/pkg/token"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
"openreplay/backend/services/http/geoip"
|
||||
"openreplay/backend/services/http/uaparser"
|
||||
|
||||
)
|
||||
|
||||
var rewriter *assets.Rewriter
|
||||
|
|
@ -38,6 +36,7 @@ var TOPIC_RAW_WEB string
|
|||
var TOPIC_RAW_IOS string
|
||||
var TOPIC_CACHE string
|
||||
var TOPIC_TRIGGER string
|
||||
|
||||
//var TOPIC_ANALYTICS string
|
||||
var CACHE_ASSESTS bool
|
||||
var BEACON_SIZE_LIMIT int64
|
||||
|
|
@ -53,7 +52,7 @@ func main() {
|
|||
TOPIC_TRIGGER = env.String("TOPIC_TRIGGER")
|
||||
//TOPIC_ANALYTICS = env.String("TOPIC_ANALYTICS")
|
||||
rewriter = assets.NewRewriter(env.String("ASSETS_ORIGIN"))
|
||||
pgconn = cache.NewPGCache(postgres.NewConn(env.String("POSTGRES_STRING")), 1000 * 60 * 20)
|
||||
pgconn = cache.NewPGCache(postgres.NewConn(env.String("POSTGRES_STRING")), 1000*60*20)
|
||||
defer pgconn.Close()
|
||||
s3 = storage.NewS3(env.String("AWS_REGION"), env.String("S3_BUCKET_IOS_IMAGES"))
|
||||
tokenizer = token.NewTokenizer(env.String("TOKEN_SECRET"))
|
||||
|
|
@ -70,7 +69,7 @@ func main() {
|
|||
Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// TODO: agree with specification
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "POST")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type,Authorization")
|
||||
if r.Method == http.MethodOptions {
|
||||
|
|
@ -79,13 +78,12 @@ func main() {
|
|||
return
|
||||
}
|
||||
|
||||
log.Printf("Request: %v - %v ", r.Method, r.URL.Path)
|
||||
|
||||
log.Printf("Request: %v - %v ", r.Method, r.URL.Path)
|
||||
|
||||
switch r.URL.Path {
|
||||
case "/":
|
||||
w.WriteHeader(http.StatusOK)
|
||||
case "/v1/web/not-started":
|
||||
case "/v1/web/not-started":
|
||||
switch r.Method {
|
||||
case http.MethodPost:
|
||||
notStartedHandlerWeb(w, r)
|
||||
|
|
|
|||
|
|
@ -1,12 +0,0 @@
|
|||
package main
|
||||
|
||||
func decodeProjectID(projectID uint64) uint64 {
|
||||
if projectID < 0x10000000000000 || projectID >= 0x20000000000000 {
|
||||
return 0
|
||||
}
|
||||
projectID = (projectID - 0x10000000000000) * 4212451012670231 & 0xfffffffffffff
|
||||
if projectID > 0xffffffff {
|
||||
return 0
|
||||
}
|
||||
return projectID
|
||||
}
|
||||
|
|
@ -1,15 +1,14 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"encoding/json"
|
||||
"net/url"
|
||||
"time"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"openreplay/backend/pkg/utime"
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
|
@ -18,15 +17,14 @@ import (
|
|||
*/
|
||||
|
||||
type bugsnag struct {
|
||||
BugsnagProjectId string // `json:"bugsnag_project_id"`
|
||||
BugsnagProjectId string // `json:"bugsnag_project_id"`
|
||||
AuthorizationToken string // `json:"auth_token"`
|
||||
}
|
||||
|
||||
|
||||
type bugsnagEvent struct {
|
||||
MetaData struct {
|
||||
SpecialInfo struct {
|
||||
AsayerSessionId uint64 `json:"asayerSessionId,string"`
|
||||
AsayerSessionId uint64 `json:"asayerSessionId,string"`
|
||||
OpenReplaySessionToken string `json:"openReplaySessionToken"`
|
||||
} `json:"special_info"`
|
||||
} `json:"metaData"`
|
||||
|
|
@ -38,7 +36,7 @@ type bugsnagEvent struct {
|
|||
|
||||
func (b *bugsnag) Request(c *client) error {
|
||||
sinceTs := c.getLastMessageTimestamp() + 1000 // From next second
|
||||
sinceFormatted := time.Unix(0, int64(sinceTs*1e6)).Format(time.RFC3339)
|
||||
sinceFormatted := time.UnixMilli(int64(sinceTs)).Format(time.RFC3339)
|
||||
requestURL := fmt.Sprintf("https://api.bugsnag.com/projects/%v/events", b.BugsnagProjectId)
|
||||
req, err := http.NewRequest("GET", requestURL, nil)
|
||||
if err != nil {
|
||||
|
|
@ -47,10 +45,10 @@ func (b *bugsnag) Request(c *client) error {
|
|||
q := req.URL.Query()
|
||||
// q.Add("per_page", "100") // Up to a maximum of 30. Default: 30
|
||||
// q.Add("sort", "timestamp") // Default: timestamp (timestamp == ReceivedAt ??)
|
||||
q.Add("direction", "asc") // Default: desc
|
||||
q.Add("direction", "asc") // Default: desc
|
||||
q.Add("full_reports", "true") // Default: false
|
||||
q.Add("filters[event.since][][type]", "eq")
|
||||
q.Add("filters[event.since][][value]", sinceFormatted) // seems like inclusively
|
||||
q.Add("filters[event.since][][type]", "eq")
|
||||
q.Add("filters[event.since][][value]", sinceFormatted) // seems like inclusively
|
||||
req.URL.RawQuery = q.Encode()
|
||||
|
||||
authToken := "token " + b.AuthorizationToken
|
||||
|
|
@ -85,7 +83,7 @@ func (b *bugsnag) Request(c *client) error {
|
|||
}
|
||||
sessionID := e.MetaData.SpecialInfo.AsayerSessionId
|
||||
token := e.MetaData.SpecialInfo.OpenReplaySessionToken
|
||||
if sessionID == 0 && token == "" {
|
||||
if sessionID == 0 && token == "" {
|
||||
// c.errChan <- "No AsayerSessionId found. | Message: %v", e
|
||||
continue
|
||||
}
|
||||
|
|
@ -94,16 +92,16 @@ func (b *bugsnag) Request(c *client) error {
|
|||
c.errChan <- err
|
||||
continue
|
||||
}
|
||||
timestamp := uint64(utime.ToMilliseconds(parsedTime))
|
||||
timestamp := uint64(parsedTime.UnixMilli())
|
||||
c.setLastMessageTimestamp(timestamp)
|
||||
c.evChan <- &SessionErrorEvent{
|
||||
SessionID: sessionID,
|
||||
Token: token,
|
||||
Token: token,
|
||||
RawErrorEvent: &messages.RawErrorEvent{
|
||||
Source: "bugsnag",
|
||||
Source: "bugsnag",
|
||||
Timestamp: timestamp,
|
||||
Name: e.Exceptions[0].Message,
|
||||
Payload: string(jsonEvent),
|
||||
Name: e.Exceptions[0].Message,
|
||||
Payload: string(jsonEvent),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,10 +5,10 @@ import (
|
|||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/utime"
|
||||
)
|
||||
|
||||
const MAX_ATTEMPTS_IN_A_ROW = 4
|
||||
|
|
@ -20,10 +20,10 @@ type requester interface {
|
|||
}
|
||||
|
||||
type requestData struct {
|
||||
LastMessageTimestamp uint64 // `json:"lastMessageTimestamp, string"`
|
||||
LastMessageId string
|
||||
LastMessageTimestamp uint64 // `json:"lastMessageTimestamp, string"`
|
||||
LastMessageId string
|
||||
UnsuccessfullAttemptsCount int
|
||||
LastAttemptTimestamp int64
|
||||
LastAttemptTimestamp int64
|
||||
}
|
||||
|
||||
type client struct {
|
||||
|
|
@ -31,19 +31,19 @@ type client struct {
|
|||
requester
|
||||
integration *postgres.Integration
|
||||
// TODO: timeout ?
|
||||
mux sync.Mutex
|
||||
mux sync.Mutex
|
||||
updateChan chan<- postgres.Integration
|
||||
evChan chan<- *SessionErrorEvent
|
||||
errChan chan<- error
|
||||
evChan chan<- *SessionErrorEvent
|
||||
errChan chan<- error
|
||||
}
|
||||
|
||||
type SessionErrorEvent struct {
|
||||
SessionID uint64
|
||||
Token string
|
||||
Token string
|
||||
*messages.RawErrorEvent
|
||||
}
|
||||
|
||||
type ClientMap map[ string ]*client
|
||||
type ClientMap map[string]*client
|
||||
|
||||
func NewClient(i *postgres.Integration, updateChan chan<- postgres.Integration, evChan chan<- *SessionErrorEvent, errChan chan<- error) (*client, error) {
|
||||
c := new(client)
|
||||
|
|
@ -60,15 +60,14 @@ func NewClient(i *postgres.Integration, updateChan chan<- postgres.Integration,
|
|||
// TODO: RequestData manager
|
||||
if c.requestData.LastMessageTimestamp == 0 {
|
||||
// ?
|
||||
c.requestData.LastMessageTimestamp = uint64(utime.CurrentTimestamp() - 24*60*60*1000)
|
||||
c.requestData.LastMessageTimestamp = uint64(time.Now().Add(-time.Hour * 24).UnixMilli())
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
|
||||
// from outside
|
||||
func (c* client) Update(i *postgres.Integration) error {
|
||||
func (c *client) Update(i *postgres.Integration) error {
|
||||
c.mux.Lock()
|
||||
defer c.mux.Unlock()
|
||||
var r requester
|
||||
|
|
@ -111,8 +110,8 @@ func (c *client) getLastMessageTimestamp() uint64 {
|
|||
}
|
||||
func (c *client) setLastMessageId(timestamp uint64, id string) {
|
||||
//if timestamp >= c.requestData.LastMessageTimestamp {
|
||||
c.requestData.LastMessageId = id
|
||||
c.requestData.LastMessageTimestamp = timestamp
|
||||
c.requestData.LastMessageId = id
|
||||
c.requestData.LastMessageTimestamp = timestamp
|
||||
//}
|
||||
}
|
||||
func (c *client) getLastMessageId() string {
|
||||
|
|
@ -128,18 +127,18 @@ func (c *client) Request() {
|
|||
c.mux.Lock()
|
||||
defer c.mux.Unlock()
|
||||
if c.requestData.UnsuccessfullAttemptsCount >= MAX_ATTEMPTS ||
|
||||
(c.requestData.UnsuccessfullAttemptsCount >= MAX_ATTEMPTS_IN_A_ROW &&
|
||||
utime.CurrentTimestamp() - c.requestData.LastAttemptTimestamp < ATTEMPTS_INTERVAL) {
|
||||
(c.requestData.UnsuccessfullAttemptsCount >= MAX_ATTEMPTS_IN_A_ROW &&
|
||||
time.Now().UnixMilli()-c.requestData.LastAttemptTimestamp < ATTEMPTS_INTERVAL) {
|
||||
return
|
||||
}
|
||||
|
||||
c.requestData.LastAttemptTimestamp = utime.CurrentTimestamp()
|
||||
c.requestData.LastAttemptTimestamp = time.Now().UnixMilli()
|
||||
err := c.requester.Request(c)
|
||||
if err != nil {
|
||||
log.Println("ERRROR L139")
|
||||
log.Println(err)
|
||||
c.handleError(err)
|
||||
c.requestData.UnsuccessfullAttemptsCount++;
|
||||
c.requestData.UnsuccessfullAttemptsCount++
|
||||
} else {
|
||||
c.requestData.UnsuccessfullAttemptsCount = 0
|
||||
}
|
||||
|
|
@ -152,5 +151,3 @@ func (c *client) Request() {
|
|||
c.integration.RequestData = rd
|
||||
c.updateChan <- *c.integration
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,38 +1,37 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"encoding/json"
|
||||
"bytes"
|
||||
"time"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/utime"
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
/*
|
||||
We collect Logs. Datadog also has Events
|
||||
|
||||
*/
|
||||
|
||||
type datadog struct {
|
||||
ApplicationKey string //`json:"application_key"`
|
||||
ApiKey string //`json:"api_key"`
|
||||
ApplicationKey string //`json:"application_key"`
|
||||
ApiKey string //`json:"api_key"`
|
||||
}
|
||||
|
||||
type datadogResponce struct {
|
||||
Logs []json.RawMessage
|
||||
Logs []json.RawMessage
|
||||
NextLogId *string
|
||||
Status string
|
||||
Status string
|
||||
}
|
||||
|
||||
type datadogLog struct {
|
||||
Content struct {
|
||||
Timestamp string
|
||||
Message string
|
||||
Timestamp string
|
||||
Message string
|
||||
Attributes struct {
|
||||
Error struct { // Not sure about this
|
||||
Message string
|
||||
|
|
@ -48,10 +47,10 @@ func (d *datadog) makeRequest(nextLogId *string, fromTs uint64, toTs uint64) (*h
|
|||
d.ApplicationKey,
|
||||
)
|
||||
startAt := "null"
|
||||
if nextLogId != nil && *nextLogId != "" {
|
||||
if nextLogId != nil && *nextLogId != "" {
|
||||
startAt = *nextLogId
|
||||
}
|
||||
// Query: status:error/info/warning?
|
||||
// Query: status:error/info/warning?
|
||||
// openReplaySessionToken instead of asayer_session_id
|
||||
jsonBody := fmt.Sprintf(`{
|
||||
"limit": 1000,
|
||||
|
|
@ -72,8 +71,8 @@ func (d *datadog) makeRequest(nextLogId *string, fromTs uint64, toTs uint64) (*h
|
|||
}
|
||||
|
||||
func (d *datadog) Request(c *client) error {
|
||||
fromTs := c.getLastMessageTimestamp() + 1 // From next millisecond
|
||||
toTs := uint64(utime.CurrentTimestamp())
|
||||
fromTs := c.getLastMessageTimestamp() + 1 // From next millisecond
|
||||
toTs := uint64(time.Now().UnixMilli())
|
||||
var nextLogId *string
|
||||
for {
|
||||
req, err := d.makeRequest(nextLogId, fromTs, toTs)
|
||||
|
|
@ -111,16 +110,16 @@ func (d *datadog) Request(c *client) error {
|
|||
c.errChan <- err
|
||||
continue
|
||||
}
|
||||
timestamp := uint64(utime.ToMilliseconds(parsedTime))
|
||||
timestamp := uint64(parsedTime.UnixMilli())
|
||||
c.setLastMessageTimestamp(timestamp)
|
||||
c.evChan <- &SessionErrorEvent{
|
||||
//SessionID: sessionID,
|
||||
Token: token,
|
||||
RawErrorEvent: &messages.RawErrorEvent{
|
||||
Source: "datadog",
|
||||
Source: "datadog",
|
||||
Timestamp: timestamp,
|
||||
Name: ddLog.Content.Attributes.Error.Message,
|
||||
Payload: string(jsonLog),
|
||||
Name: ddLog.Content.Attributes.Error.Message,
|
||||
Payload: string(jsonLog),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -129,4 +128,4 @@ func (d *datadog) Request(c *client) error {
|
|||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ import (
|
|||
"time"
|
||||
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/utime"
|
||||
)
|
||||
|
||||
type elasticsearch struct {
|
||||
|
|
@ -164,7 +163,7 @@ func (es *elasticsearch) Request(c *client) error {
|
|||
c.errChan <- err
|
||||
continue
|
||||
}
|
||||
timestamp := uint64(utime.ToMilliseconds(esLog.Time))
|
||||
timestamp := uint64(esLog.Time.UnixMilli())
|
||||
c.setLastMessageTimestamp(timestamp)
|
||||
|
||||
var sessionID uint64
|
||||
|
|
|
|||
|
|
@ -2,25 +2,24 @@ package integration
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
We use insights-api for query. They also have Logs and Events
|
||||
We use insights-api for query. They also have Logs and Events
|
||||
*/
|
||||
|
||||
|
||||
// TODO: Eu/us
|
||||
type newrelic struct {
|
||||
ApplicationId string //`json:"application_id"`
|
||||
XQueryKey string //`json:"x_query_key"`
|
||||
ApplicationId string //`json:"application_id"`
|
||||
XQueryKey string //`json:"x_query_key"`
|
||||
}
|
||||
|
||||
// TODO: Recheck
|
||||
|
|
@ -34,14 +33,14 @@ type newrelicResponce struct {
|
|||
type newrelicEvent struct {
|
||||
//AsayerSessionID uint64 `json:"asayer_session_id,string"` // string/int decoder?
|
||||
OpenReplaySessionToken string `json:"openReplaySessionToken"`
|
||||
ErrorClass string `json:"error.class"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
ErrorClass string `json:"error.class"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
func (nr *newrelic) Request(c *client) error {
|
||||
sinceTs := c.getLastMessageTimestamp() + 1000 // From next second
|
||||
// In docs - format "yyyy-mm-dd HH:MM:ss", but time.RFC3339 works fine too
|
||||
sinceFormatted := time.Unix(0, int64(sinceTs*1e6)).Format(time.RFC3339)
|
||||
sinceFormatted := time.UnixMilli(int64(sinceTs)).Format(time.RFC3339)
|
||||
// US/EU endpoint ??
|
||||
requestURL := fmt.Sprintf("https://insights-api.eu.newrelic.com/v1/accounts/%v/query", nr.ApplicationId)
|
||||
req, err := http.NewRequest("GET", requestURL, nil)
|
||||
|
|
@ -64,11 +63,10 @@ func (nr *newrelic) Request(c *client) error {
|
|||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
|
||||
// 401 (unauthorised) if wrong XQueryKey/deploymentServer is wrong or 403 (Forbidden) if ApplicationId is wrong
|
||||
// 400 if Query has problems
|
||||
if resp.StatusCode >= 400 {
|
||||
io.Copy(ioutil.Discard, resp.Body) // Read the body to free socket
|
||||
io.Copy(ioutil.Discard, resp.Body) // Read the body to free socket
|
||||
return fmt.Errorf("Newrelic: server respond with the code %v| Request: ", resp.StatusCode, *req)
|
||||
}
|
||||
// Pagination depending on returning metadata ?
|
||||
|
|
@ -92,13 +90,13 @@ func (nr *newrelic) Request(c *client) error {
|
|||
c.evChan <- &SessionErrorEvent{
|
||||
Token: e.OpenReplaySessionToken,
|
||||
RawErrorEvent: &messages.RawErrorEvent{
|
||||
Source: "newrelic",
|
||||
Source: "newrelic",
|
||||
Timestamp: e.Timestamp,
|
||||
Name: e.ErrorClass,
|
||||
Payload: string(jsonEvent),
|
||||
Name: e.ErrorClass,
|
||||
Payload: string(jsonEvent),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,44 +1,41 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"fmt"
|
||||
"time"
|
||||
"strconv"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/utime"
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
||||
/*
|
||||
/*
|
||||
They also have different stuff
|
||||
Documentation says:
|
||||
Documentation says:
|
||||
"Note: This endpoint is experimental and may be removed without notice."
|
||||
*/
|
||||
|
||||
type sentry struct {
|
||||
OrganizationSlug string // `json:"organization_slug"`
|
||||
ProjectSlug string // `json:"project_slug"`
|
||||
Token string // `json:"token"`
|
||||
ProjectSlug string // `json:"project_slug"`
|
||||
Token string // `json:"token"`
|
||||
}
|
||||
|
||||
type sentryEvent struct {
|
||||
Tags []struct {
|
||||
Key string
|
||||
Value string `json:"value"`
|
||||
Key string
|
||||
Value string `json:"value"`
|
||||
}
|
||||
DateCreated string `json:"dateCreated"` // or dateReceived ?
|
||||
Title string
|
||||
EventID string `json:"eventID"`
|
||||
DateCreated string `json:"dateCreated"` // or dateReceived ?
|
||||
Title string
|
||||
EventID string `json:"eventID"`
|
||||
}
|
||||
|
||||
|
||||
func (sn *sentry) Request(c *client) error {
|
||||
requestURL := fmt.Sprintf("https://sentry.io/api/0/projects/%v/%v/events/", sn.OrganizationSlug, sn.ProjectSlug)
|
||||
req, err := http.NewRequest("GET", requestURL, nil)
|
||||
|
|
@ -88,9 +85,9 @@ PageLoop:
|
|||
c.errChan <- fmt.Errorf("%v | Event: %v", err, e)
|
||||
continue
|
||||
}
|
||||
timestamp := uint64(utime.ToMilliseconds(parsedTime))
|
||||
timestamp := uint64(parsedTime.UnixMilli())
|
||||
// TODO: not to receive all the messages (use default integration timestamp)
|
||||
if firstEvent { // TODO: reverse range?
|
||||
if firstEvent { // TODO: reverse range?
|
||||
c.setLastMessageId(timestamp, e.EventID)
|
||||
firstEvent = false
|
||||
}
|
||||
|
|
@ -117,12 +114,12 @@ PageLoop:
|
|||
|
||||
c.evChan <- &SessionErrorEvent{
|
||||
SessionID: sessionID,
|
||||
Token: token,
|
||||
Token: token,
|
||||
RawErrorEvent: &messages.RawErrorEvent{
|
||||
Source: "sentry",
|
||||
Source: "sentry",
|
||||
Timestamp: timestamp,
|
||||
Name: e.Title,
|
||||
Payload: string(jsonEvent),
|
||||
Name: e.Title,
|
||||
Payload: string(jsonEvent),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
@ -137,7 +134,7 @@ PageLoop:
|
|||
return fmt.Errorf("Link header format error. Got: '%v'", linkHeader)
|
||||
}
|
||||
|
||||
nextLinkInfo := pagInfo[ 1 ]
|
||||
nextLinkInfo := pagInfo[1]
|
||||
if strings.Contains(nextLinkInfo, `results="false"`) {
|
||||
break
|
||||
}
|
||||
|
|
@ -151,4 +148,4 @@ PageLoop:
|
|||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,22 +1,19 @@
|
|||
package integration
|
||||
|
||||
|
||||
import (
|
||||
"google.golang.org/api/option"
|
||||
"cloud.google.com/go/logging/logadmin"
|
||||
"google.golang.org/api/iterator"
|
||||
|
||||
//"strconv"
|
||||
"encoding/json"
|
||||
"time"
|
||||
"fmt"
|
||||
"context"
|
||||
"google.golang.org/api/option"
|
||||
|
||||
//"strconv"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/utime"
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
||||
// Old: asayerSessionId
|
||||
|
||||
const SD_FILTER_QUERY = `
|
||||
|
|
@ -28,7 +25,7 @@ const SD_FILTER_QUERY = `
|
|||
|
||||
type stackdriver struct {
|
||||
ServiceAccountCredentials string // `json:"service_account_credentials"`
|
||||
LogName string // `json:"log_name"`
|
||||
LogName string // `json:"log_name"`
|
||||
}
|
||||
|
||||
type saCreds struct {
|
||||
|
|
@ -37,10 +34,10 @@ type saCreds struct {
|
|||
|
||||
func (sd *stackdriver) Request(c *client) error {
|
||||
fromTs := c.getLastMessageTimestamp() + 1 // Timestamp is RFC3339Nano, so we take the next millisecond
|
||||
fromFormatted := time.Unix(0, int64(fromTs *1e6)).Format(time.RFC3339Nano)
|
||||
fromFormatted := time.UnixMilli(int64(fromTs)).Format(time.RFC3339Nano)
|
||||
ctx := context.Background()
|
||||
|
||||
var parsedCreds saCreds
|
||||
var parsedCreds saCreds
|
||||
err := json.Unmarshal([]byte(sd.ServiceAccountCredentials), &parsedCreds)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -49,56 +46,56 @@ func (sd *stackdriver) Request(c *client) error {
|
|||
opt := option.WithCredentialsJSON([]byte(sd.ServiceAccountCredentials))
|
||||
client, err := logadmin.NewClient(ctx, parsedCreds.ProjectId, opt)
|
||||
if err != nil {
|
||||
return err
|
||||
return err
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
filter := fmt.Sprintf(SD_FILTER_QUERY, parsedCreds.ProjectId, sd.LogName, fromFormatted)
|
||||
// By default, Entries are listed from oldest to newest.
|
||||
/* ResourceNames(rns []string)
|
||||
"projects/[PROJECT_ID]"
|
||||
"organizations/[ORGANIZATION_ID]"
|
||||
"billingAccounts/[BILLING_ACCOUNT_ID]"
|
||||
"folders/[FOLDER_ID]"
|
||||
*/
|
||||
it := client.Entries(ctx, logadmin.Filter(filter))
|
||||
|
||||
// TODO: Pagination:
|
||||
//pager := iterator.NewPager(it, 1000, "")
|
||||
//nextToken, err := pager.NextPage(&entries)
|
||||
//if nextToken == "" { break }
|
||||
for {
|
||||
e, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
filter := fmt.Sprintf(SD_FILTER_QUERY, parsedCreds.ProjectId, sd.LogName, fromFormatted)
|
||||
// By default, Entries are listed from oldest to newest.
|
||||
/* ResourceNames(rns []string)
|
||||
"projects/[PROJECT_ID]"
|
||||
"organizations/[ORGANIZATION_ID]"
|
||||
"billingAccounts/[BILLING_ACCOUNT_ID]"
|
||||
"folders/[FOLDER_ID]"
|
||||
*/
|
||||
it := client.Entries(ctx, logadmin.Filter(filter))
|
||||
|
||||
token := e.Labels["openReplaySessionToken"]
|
||||
// sessionID, err := strconv.ParseUint(strSessionID, 10, 64)
|
||||
// if err != nil {
|
||||
// c.errChan <- err
|
||||
// continue
|
||||
// }
|
||||
jsonEvent, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
c.errChan <- err
|
||||
continue
|
||||
}
|
||||
timestamp := uint64(utime.ToMilliseconds(e.Timestamp))
|
||||
c.setLastMessageTimestamp(timestamp)
|
||||
c.evChan <- &SessionErrorEvent{
|
||||
// TODO: Pagination:
|
||||
//pager := iterator.NewPager(it, 1000, "")
|
||||
//nextToken, err := pager.NextPage(&entries)
|
||||
//if nextToken == "" { break }
|
||||
for {
|
||||
e, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
token := e.Labels["openReplaySessionToken"]
|
||||
// sessionID, err := strconv.ParseUint(strSessionID, 10, 64)
|
||||
// if err != nil {
|
||||
// c.errChan <- err
|
||||
// continue
|
||||
// }
|
||||
jsonEvent, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
c.errChan <- err
|
||||
continue
|
||||
}
|
||||
timestamp := uint64(e.Timestamp.UnixMilli())
|
||||
c.setLastMessageTimestamp(timestamp)
|
||||
c.evChan <- &SessionErrorEvent{
|
||||
//SessionID: sessionID,
|
||||
Token: token,
|
||||
RawErrorEvent: &messages.RawErrorEvent{
|
||||
Source: "stackdriver",
|
||||
Source: "stackdriver",
|
||||
Timestamp: timestamp,
|
||||
Name: e.InsertID, // not sure about that
|
||||
Payload: string(jsonEvent),
|
||||
Name: e.InsertID, // not sure about that
|
||||
Payload: string(jsonEvent),
|
||||
},
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,20 +1,19 @@
|
|||
package integration
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/utime"
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
The maximum value for limit is 10,000 messages or 100 MB in total message size,
|
||||
/*
|
||||
The maximum value for limit is 10,000 messages or 100 MB in total message size,
|
||||
which means the query may return less than 10,000 messages if you exceed the size limit.
|
||||
|
||||
API Documentation: https://help.sumologic.com/APIs/Search-Job-API/About-the-Search-Job-API
|
||||
|
|
@ -22,31 +21,30 @@ import (
|
|||
const SL_LIMIT = 10000
|
||||
|
||||
type sumologic struct {
|
||||
AccessId string // `json:"access_id"`
|
||||
AccessKey string // `json:"access_key"`
|
||||
cookies []*http.Cookie
|
||||
AccessId string // `json:"access_id"`
|
||||
AccessKey string // `json:"access_key"`
|
||||
cookies []*http.Cookie
|
||||
}
|
||||
|
||||
|
||||
type sumplogicJobResponce struct {
|
||||
Id string
|
||||
}
|
||||
|
||||
type sumologicJobStatusResponce struct {
|
||||
State string
|
||||
State string
|
||||
MessageCount int
|
||||
//PendingErrors []string
|
||||
}
|
||||
|
||||
type sumologicResponce struct {
|
||||
Messages [] struct {
|
||||
Messages []struct {
|
||||
Map json.RawMessage
|
||||
}
|
||||
}
|
||||
|
||||
type sumologicEvent struct {
|
||||
Timestamp uint64 `json:"_messagetime,string"`
|
||||
Raw string `json:"_raw"`
|
||||
Raw string `json:"_raw"`
|
||||
}
|
||||
|
||||
func (sl *sumologic) deleteJob(jobId string, errChan chan<- error) {
|
||||
|
|
@ -68,10 +66,9 @@ func (sl *sumologic) deleteJob(jobId string, errChan chan<- error) {
|
|||
resp.Body.Close()
|
||||
}
|
||||
|
||||
|
||||
func (sl *sumologic) Request(c *client) error {
|
||||
fromTs := c.getLastMessageTimestamp() + 1 // From next millisecond
|
||||
toTs := utime.CurrentTimestamp()
|
||||
toTs := time.Now().UnixMilli()
|
||||
requestURL := fmt.Sprintf("https://api.%vsumologic.com/api/v1/search/jobs", "eu.") // deployment server??
|
||||
jsonBody := fmt.Sprintf(`{
|
||||
"query": "\"openReplaySessionToken=\" AND (*error* OR *fail* OR *exception*)",
|
||||
|
|
@ -132,7 +129,7 @@ func (sl *sumologic) Request(c *client) error {
|
|||
|
||||
tick := time.Tick(5 * time.Second)
|
||||
for {
|
||||
<- tick
|
||||
<-tick
|
||||
resp, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err // TODO: retry, counter/timeout
|
||||
|
|
@ -147,12 +144,12 @@ func (sl *sumologic) Request(c *client) error {
|
|||
}
|
||||
if jobStatus.State == "DONE GATHERING RESULTS" {
|
||||
offset := 0
|
||||
for ;offset < jobStatus.MessageCount; {
|
||||
for offset < jobStatus.MessageCount {
|
||||
requestURL = fmt.Sprintf(
|
||||
"https://api.%vsumologic.com/api/v1/search/jobs/%v/messages?offset=%v&limit=%v",
|
||||
"eu.",
|
||||
jobResponce.Id,
|
||||
offset,
|
||||
"https://api.%vsumologic.com/api/v1/search/jobs/%v/messages?offset=%v&limit=%v",
|
||||
"eu.",
|
||||
jobResponce.Id,
|
||||
offset,
|
||||
SL_LIMIT,
|
||||
)
|
||||
req, err = http.NewRequest("GET", requestURL, nil)
|
||||
|
|
@ -190,17 +187,17 @@ func (sl *sumologic) Request(c *client) error {
|
|||
}
|
||||
name := e.Raw
|
||||
if len(name) > 20 {
|
||||
name = name[:20] // not sure about that
|
||||
name = name[:20] // not sure about that
|
||||
}
|
||||
c.setLastMessageTimestamp(e.Timestamp)
|
||||
c.evChan <- &SessionErrorEvent{
|
||||
//SessionID: sessionID,
|
||||
Token: token,
|
||||
RawErrorEvent: &messages.RawErrorEvent{
|
||||
Source: "sumologic",
|
||||
Source: "sumologic",
|
||||
Timestamp: e.Timestamp,
|
||||
Name: name,
|
||||
Payload: string(m.Map), //e.Raw ?
|
||||
Name: name,
|
||||
Payload: string(m.Map), //e.Raw ?
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -209,11 +206,11 @@ func (sl *sumologic) Request(c *client) error {
|
|||
}
|
||||
break
|
||||
}
|
||||
if jobStatus.State != "NOT STARTED" &&
|
||||
if jobStatus.State != "NOT STARTED" &&
|
||||
jobStatus.State != "GATHERING RESULTS" {
|
||||
// error
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +1,8 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
|
|
@ -10,67 +10,64 @@ import (
|
|||
"syscall"
|
||||
|
||||
"openreplay/backend/pkg/env"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
FS_DIR := env.String("FS_DIR");
|
||||
FS_DIR := env.String("FS_DIR")
|
||||
if _, err := os.Stat(FS_DIR); os.IsNotExist(err) {
|
||||
log.Fatalf("%v doesn't exist. %v", FS_DIR, err)
|
||||
}
|
||||
|
||||
writer := NewWriter(env.Uint16("FS_ULIMIT"), FS_DIR)
|
||||
|
||||
count := 0
|
||||
count := 0
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
env.String("GROUP_SINK"),
|
||||
[]string{
|
||||
[]string{
|
||||
env.String("TOPIC_RAW_WEB"),
|
||||
env.String("TOPIC_RAW_IOS"),
|
||||
},
|
||||
func(sessionID uint64, message Message, _ *types.Meta) {
|
||||
//typeID, err := GetMessageTypeID(value)
|
||||
// if err != nil {
|
||||
// log.Printf("Message type decoding error: %v", err)
|
||||
// return
|
||||
// }
|
||||
typeID := message.Meta().TypeID
|
||||
if !IsReplayerType(typeID) {
|
||||
return
|
||||
}
|
||||
},
|
||||
func(sessionID uint64, message Message, _ *types.Meta) {
|
||||
//typeID, err := GetMessageTypeID(value)
|
||||
// if err != nil {
|
||||
// log.Printf("Message type decoding error: %v", err)
|
||||
// return
|
||||
// }
|
||||
typeID := message.Meta().TypeID
|
||||
if !IsReplayerType(typeID) {
|
||||
return
|
||||
}
|
||||
|
||||
count++
|
||||
count++
|
||||
|
||||
value := message.Encode()
|
||||
var data []byte
|
||||
if IsIOSType(typeID) {
|
||||
data = value
|
||||
} else {
|
||||
value := message.Encode()
|
||||
var data []byte
|
||||
if IsIOSType(typeID) {
|
||||
data = value
|
||||
} else {
|
||||
data = make([]byte, len(value)+8)
|
||||
copy(data[8:], value[:])
|
||||
binary.LittleEndian.PutUint64(data[0:], message.Meta().Index)
|
||||
}
|
||||
if err := writer.Write(sessionID, data); err != nil {
|
||||
}
|
||||
if err := writer.Write(sessionID, data); err != nil {
|
||||
log.Printf("Writer error: %v\n", err)
|
||||
}
|
||||
},
|
||||
},
|
||||
false,
|
||||
)
|
||||
consumer.DisableAutoCommit()
|
||||
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
tick := time.Tick(30 * time.Second)
|
||||
tick := time.Tick(30 * time.Second)
|
||||
|
||||
log.Printf("Sink service started\n")
|
||||
log.Printf("Sink service started\n")
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
|
|
@ -85,7 +82,7 @@ func main() {
|
|||
|
||||
log.Printf("%v messages during 30 sec", count)
|
||||
count = 0
|
||||
|
||||
|
||||
consumer.Commit()
|
||||
default:
|
||||
err := consumer.ConsumeNext()
|
||||
|
|
@ -96,4 +93,3 @@ func main() {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -2,45 +2,41 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/pkg/storage"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/storage"
|
||||
)
|
||||
|
||||
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
|
||||
storageWeb := storage.NewS3(env.String("AWS_REGION_WEB"), env.String("S3_BUCKET_WEB"))
|
||||
//storageIos := storage.NewS3(env.String("AWS_REGION_IOS"), env.String("S3_BUCKET_IOS"))
|
||||
storage := storage.NewS3(env.String("AWS_REGION_WEB"), env.String("S3_BUCKET_WEB"))
|
||||
FS_DIR := env.String("FS_DIR")
|
||||
FS_CLEAN_HRS := env.Int("FS_CLEAN_HRS")
|
||||
|
||||
var uploadKey func(string, int, *storage.S3)
|
||||
uploadKey = func(key string, retryCount int, s *storage.S3) {
|
||||
var uploadKey func(string, int)
|
||||
uploadKey = func(key string, retryCount int) {
|
||||
if retryCount <= 0 {
|
||||
return;
|
||||
return
|
||||
}
|
||||
file, err := os.Open(FS_DIR + "/" + key)
|
||||
defer file.Close()
|
||||
if err != nil {
|
||||
log.Printf("File error: %v; Will retry %v more time(s)\n", err, retryCount)
|
||||
time.AfterFunc(2*time.Minute, func() {
|
||||
uploadKey(key, retryCount - 1, s)
|
||||
uploadKey(key, retryCount-1)
|
||||
})
|
||||
} else {
|
||||
if err := s.Upload(gzipFile(file), key, "application/octet-stream", true); err != nil {
|
||||
if err := storage.Upload(gzipFile(file), key, "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage upload error: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -48,27 +44,24 @@ func main() {
|
|||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
env.String("GROUP_STORAGE"),
|
||||
[]string{
|
||||
[]string{
|
||||
env.String("TOPIC_TRIGGER"),
|
||||
},
|
||||
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
|
||||
switch msg.(type) {
|
||||
case *messages.SessionEnd:
|
||||
uploadKey(strconv.FormatUint(sessionID, 10), 5, storageWeb)
|
||||
//case *messages.IOSSessionEnd:
|
||||
// uploadKey(strconv.FormatUint(sessionID, 10), 5, storageIos)
|
||||
}
|
||||
},
|
||||
},
|
||||
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
|
||||
switch msg.(type) {
|
||||
case *messages.SessionEnd:
|
||||
uploadKey(strconv.FormatUint(sessionID, 10), 5)
|
||||
}
|
||||
},
|
||||
true,
|
||||
)
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
cleanTick := time.Tick(time.Duration(FS_CLEAN_HRS) * time.Hour)
|
||||
|
||||
cleanTick := time.Tick(time.Duration(FS_CLEAN_HRS) * time.Hour)
|
||||
|
||||
|
||||
log.Printf("Storage service started\n")
|
||||
log.Printf("Storage service started\n")
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
|
|
@ -85,4 +78,3 @@ func main() {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,41 +6,40 @@ from chalicelib.core import projects
|
|||
|
||||
|
||||
def get_state(tenant_id):
|
||||
my_projects = projects.get_projects(tenant_id=tenant_id, recording_state=False)
|
||||
pids = [s["projectId"] for s in my_projects]
|
||||
pids = projects.get_projects_ids(tenant_id=tenant_id)
|
||||
with pg_client.PostgresClient() as cur:
|
||||
recorded = False
|
||||
meta = False
|
||||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
SELECT
|
||||
COUNT(*)
|
||||
FROM public.sessions AS s
|
||||
where s.project_id IN %(ids)s
|
||||
LIMIT 1;""",
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)})
|
||||
)
|
||||
recorded = cur.fetchone()["count"] > 0
|
||||
recorded = cur.fetchone()["exists"]
|
||||
meta = False
|
||||
if recorded:
|
||||
cur.execute(
|
||||
cur.mogrify("""SELECT SUM((SELECT COUNT(t.meta)
|
||||
FROM (VALUES (p.metadata_1), (p.metadata_2), (p.metadata_3), (p.metadata_4), (p.metadata_5),
|
||||
(p.metadata_6), (p.metadata_7), (p.metadata_8), (p.metadata_9), (p.metadata_10),
|
||||
(sessions.user_id)) AS t(meta)
|
||||
WHERE t.meta NOTNULL))
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 'defined'
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id=p.project_id AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON(TRUE)
|
||||
WHERE p.tenant_id = %(tenant_id)s
|
||||
AND p.deleted_at ISNULL;"""
|
||||
cur.mogrify("""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = p.project_id
|
||||
AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||
WHERE p.tenant_id = %(tenant_id)s AND p.deleted_at ISNULL
|
||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;"""
|
||||
, {"tenant_id": tenant_id}))
|
||||
|
||||
meta = cur.fetchone()["sum"] > 0
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return [
|
||||
{"task": "Install OpenReplay",
|
||||
|
|
@ -61,22 +60,18 @@ def get_state(tenant_id):
|
|||
|
||||
|
||||
def get_state_installing(tenant_id):
|
||||
my_projects = projects.get_projects(tenant_id=tenant_id, recording_state=False)
|
||||
pids = [s["projectId"] for s in my_projects]
|
||||
pids = projects.get_projects_ids(tenant_id=tenant_id)
|
||||
with pg_client.PostgresClient() as cur:
|
||||
recorded = False
|
||||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
SELECT
|
||||
COUNT(*)
|
||||
FROM public.sessions AS s
|
||||
where s.project_id IN %(ids)s
|
||||
LIMIT 1;""",
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)})
|
||||
)
|
||||
recorded = cur.fetchone()["count"] > 0
|
||||
recorded = cur.fetchone()["exists"]
|
||||
|
||||
return {"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
|
|
@ -86,21 +81,24 @@ def get_state_installing(tenant_id):
|
|||
def get_state_identify_users(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""SELECT SUM((SELECT COUNT(t.meta)
|
||||
FROM (VALUES (p.metadata_1), (p.metadata_2), (p.metadata_3), (p.metadata_4), (p.metadata_5),
|
||||
(p.metadata_6), (p.metadata_7), (p.metadata_8), (p.metadata_9), (p.metadata_10),
|
||||
(sessions.user_id)) AS t(meta)
|
||||
WHERE t.meta NOTNULL))
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 'defined'
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id=p.project_id AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON(TRUE)
|
||||
WHERE p.tenant_id = %(tenant_id)s
|
||||
AND p.deleted_at ISNULL;"""
|
||||
cur.mogrify("""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = p.project_id
|
||||
AND sessions.user_id IS NOT NULL
|
||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||
WHERE p.tenant_id = %(tenant_id)s AND p.deleted_at ISNULL
|
||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;"""
|
||||
, {"tenant_id": tenant_id}))
|
||||
|
||||
meta = cur.fetchone()["sum"] > 0
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return {"task": "Identify Users",
|
||||
"done": meta,
|
||||
|
|
|
|||
|
|
@ -324,7 +324,7 @@ def is_authorized_batch(project_ids, tenant_id):
|
|||
query = cur.mogrify("""\
|
||||
SELECT project_id
|
||||
FROM public.projects
|
||||
where tenant_id =%(tenant_id)s
|
||||
WHERE tenant_id =%(tenant_id)s
|
||||
AND project_id IN %(project_ids)s
|
||||
AND deleted_at IS NULL;""",
|
||||
{"tenant_id": tenant_id, "project_ids": tuple(project_ids)})
|
||||
|
|
@ -334,3 +334,13 @@ def is_authorized_batch(project_ids, tenant_id):
|
|||
)
|
||||
rows = cur.fetchall()
|
||||
return [r["project_id"] for r in rows]
|
||||
|
||||
|
||||
def get_projects_ids(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify("""SELECT s.project_id
|
||||
FROM public.projects AS s
|
||||
WHERE tenant_id =%(tenant_id)s AND s.deleted_at IS NULL
|
||||
ORDER BY s.project_id;""", {"tenant_id": tenant_id}))
|
||||
rows = cur.fetchall()
|
||||
return [r["project_id"] for r in rows]
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ def compute():
|
|||
RETURNING *,(SELECT email FROM users_ee WHERE role = 'owner' AND users_ee.tenant_id = tenants.tenant_id LIMIT 1);"""
|
||||
)
|
||||
data = cur.fetchall()
|
||||
requests.post('https://parrot.asayer.io/os/telemetry',
|
||||
requests.post('https://api.openreplay.com/os/telemetry',
|
||||
json={"stats": [process_data(d, edition='ee') for d in data]})
|
||||
|
||||
|
||||
|
|
@ -65,4 +65,4 @@ def new_client(tenant_id):
|
|||
FROM public.tenants
|
||||
WHERE tenant_id=%(tenant_id)s;""", {"tenant_id": tenant_id}))
|
||||
data = cur.fetchone()
|
||||
requests.post('https://parrot.asayer.io/os/signup', json=process_data(data, edition='ee'))
|
||||
requests.post('https://api.openreplay.com/os/signup', json=process_data(data, edition='ee'))
|
||||
|
|
@ -9,9 +9,9 @@ import (
|
|||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"gopkg.in/confluentinc/confluent-kafka-go.v1/kafka"
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"gopkg.in/confluentinc/confluent-kafka-go.v1/kafka"
|
||||
)
|
||||
|
||||
type Message = kafka.Message
|
||||
|
|
@ -19,13 +19,18 @@ type Message = kafka.Message
|
|||
type Consumer struct {
|
||||
c *kafka.Consumer
|
||||
messageHandler types.MessageHandler
|
||||
commitTicker *time.Ticker
|
||||
commitTicker *time.Ticker
|
||||
pollTimeout uint
|
||||
|
||||
lastKafkaEventTs int64
|
||||
}
|
||||
|
||||
func NewConsumer(group string, topics []string, messageHandler types.MessageHandler) *Consumer {
|
||||
func NewConsumer(
|
||||
group string,
|
||||
topics []string,
|
||||
messageHandler types.MessageHandler,
|
||||
autoCommit bool,
|
||||
) *Consumer {
|
||||
protocol := "plaintext"
|
||||
if env.Bool("KAFKA_USE_SSL") {
|
||||
protocol = "ssl"
|
||||
|
|
@ -53,25 +58,25 @@ func NewConsumer(group string, topics []string, messageHandler types.MessageHand
|
|||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
var commitTicker *time.Ticker
|
||||
if autoCommit {
|
||||
commitTicker = time.NewTicker(2 * time.Minute)
|
||||
}
|
||||
|
||||
return &Consumer{
|
||||
c: c,
|
||||
messageHandler: messageHandler,
|
||||
commitTicker: time.NewTicker(2 * time.Minute),
|
||||
commitTicker: commitTicker,
|
||||
pollTimeout: 200,
|
||||
}
|
||||
}
|
||||
|
||||
func (consumer *Consumer) DisableAutoCommit() {
|
||||
consumer.commitTicker.Stop()
|
||||
}
|
||||
|
||||
|
||||
func (consumer *Consumer) Commit() error {
|
||||
consumer.c.Commit() // TODO: return error if it is not "No offset stored"
|
||||
return nil
|
||||
}
|
||||
|
||||
func (consumer *Consumer) CommitAtTimestamp(commitTs int64) error {
|
||||
func (consumer *Consumer) CommitAtTimestamp(commitTs int64) error {
|
||||
assigned, err := consumer.c.Assignment()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -84,37 +89,38 @@ func (consumer *Consumer) CommitAtTimestamp(commitTs int64) error {
|
|||
timestamps = append(timestamps, p)
|
||||
}
|
||||
offsets, err := consumer.c.OffsetsForTimes(timestamps, 2000)
|
||||
if err != nil {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Kafka Consumer back commit error")
|
||||
}
|
||||
|
||||
// Limiting to already committed
|
||||
committed, err := consumer.c.Committed(assigned, 2000) // memorise?
|
||||
logPartitions("Actually committed:",committed)
|
||||
logPartitions("Actually committed:", committed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Kafka Consumer retrieving committed error")
|
||||
}
|
||||
for _, offs := range offsets {
|
||||
for _, comm := range committed {
|
||||
if comm.Offset == kafka.OffsetStored ||
|
||||
if comm.Offset == kafka.OffsetStored ||
|
||||
comm.Offset == kafka.OffsetInvalid ||
|
||||
comm.Offset == kafka.OffsetBeginning ||
|
||||
comm.Offset == kafka.OffsetEnd { continue }
|
||||
if comm.Partition == offs.Partition &&
|
||||
comm.Offset == kafka.OffsetBeginning ||
|
||||
comm.Offset == kafka.OffsetEnd {
|
||||
continue
|
||||
}
|
||||
if comm.Partition == offs.Partition &&
|
||||
(comm.Topic != nil && offs.Topic != nil && *comm.Topic == *offs.Topic) &&
|
||||
comm.Offset > offs.Offset {
|
||||
comm.Offset > offs.Offset {
|
||||
offs.Offset = comm.Offset
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: check per-partition errors: offsets[i].Error
|
||||
// TODO: check per-partition errors: offsets[i].Error
|
||||
_, err = consumer.c.CommitOffsets(offsets)
|
||||
return errors.Wrap(err, "Kafka Consumer back commit error")
|
||||
}
|
||||
|
||||
|
||||
func (consumer *Consumer) CommitBack(gap int64) error {
|
||||
func (consumer *Consumer) CommitBack(gap int64) error {
|
||||
if consumer.lastKafkaEventTs == 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -128,38 +134,40 @@ func (consumer *Consumer) ConsumeNext() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-consumer.commitTicker.C:
|
||||
consumer.Commit()
|
||||
default:
|
||||
if consumer.commitTicker != nil {
|
||||
select {
|
||||
case <-consumer.commitTicker.C:
|
||||
consumer.Commit()
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
switch e := ev.(type) {
|
||||
case *kafka.Message:
|
||||
if e.TopicPartition.Error != nil {
|
||||
return errors.Wrap(e.TopicPartition.Error, "Consumer Partition Error")
|
||||
}
|
||||
ts := e.Timestamp.UnixNano()/ 1e6
|
||||
consumer.messageHandler(decodeKey(e.Key), e.Value, &types.Meta{
|
||||
Topic: *(e.TopicPartition.Topic),
|
||||
ID: uint64(e.TopicPartition.Offset),
|
||||
Timestamp: ts,
|
||||
})
|
||||
consumer.lastKafkaEventTs = ts
|
||||
// case kafka.AssignedPartitions:
|
||||
// logPartitions("Kafka Consumer: Partitions Assigned", e.Partitions)
|
||||
// consumer.partitions = e.Partitions
|
||||
// consumer.c.Assign(e.Partitions)
|
||||
// log.Printf("Actually partitions assigned!")
|
||||
// case kafka.RevokedPartitions:
|
||||
// log.Println("Kafka Cosumer: Partitions Revoked")
|
||||
// consumer.partitions = nil
|
||||
// consumer.c.Unassign()
|
||||
case kafka.Error:
|
||||
if e.Code() == kafka.ErrAllBrokersDown {
|
||||
os.Exit(1)
|
||||
}
|
||||
log.Printf("Consumer error: %v\n", e)
|
||||
case *kafka.Message:
|
||||
if e.TopicPartition.Error != nil {
|
||||
return errors.Wrap(e.TopicPartition.Error, "Consumer Partition Error")
|
||||
}
|
||||
ts := e.Timestamp.UnixMilli()
|
||||
consumer.messageHandler(decodeKey(e.Key), e.Value, &types.Meta{
|
||||
Topic: *(e.TopicPartition.Topic),
|
||||
ID: uint64(e.TopicPartition.Offset),
|
||||
Timestamp: ts,
|
||||
})
|
||||
consumer.lastKafkaEventTs = ts
|
||||
// case kafka.AssignedPartitions:
|
||||
// logPartitions("Kafka Consumer: Partitions Assigned", e.Partitions)
|
||||
// consumer.partitions = e.Partitions
|
||||
// consumer.c.Assign(e.Partitions)
|
||||
// log.Printf("Actually partitions assigned!")
|
||||
// case kafka.RevokedPartitions:
|
||||
// log.Println("Kafka Cosumer: Partitions Revoked")
|
||||
// consumer.partitions = nil
|
||||
// consumer.c.Unassign()
|
||||
case kafka.Error:
|
||||
if e.Code() == kafka.ErrAllBrokersDown || e.Code() == kafka.ErrMaxPollExceeded {
|
||||
os.Exit(1)
|
||||
}
|
||||
log.Printf("Consumer error: %v\n", e)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -173,8 +181,6 @@ func (consumer *Consumer) Close() {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
// func (consumer *Consumer) consume(
|
||||
// message func(m *kafka.Message) error,
|
||||
// commit func(c *kafka.Consumer) error,
|
||||
|
|
@ -230,7 +236,6 @@ func (consumer *Consumer) Close() {
|
|||
// }
|
||||
// }
|
||||
|
||||
|
||||
// func (consumer *Consumer) Consume(
|
||||
// message func(key uint64, value []byte) error,
|
||||
// ) error {
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ func CheckLicense() {
|
|||
log.Fatal("Can not form a license check request.")
|
||||
}
|
||||
|
||||
resp, err := http.Post("https://parrot.asayer.io/os/license", "application/json", bytes.NewReader(requestBody))
|
||||
resp, err := http.Post("https://api.openreplay.com/os/license", "application/json", bytes.NewReader(requestBody))
|
||||
if err != nil {
|
||||
log.Fatalf("Error while checking license. %v", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,17 +2,16 @@ package queue
|
|||
|
||||
import (
|
||||
"openreplay/backend/pkg/kafka"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/license"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
func NewConsumer(group string, topics []string, handler types.MessageHandler) types.Consumer {
|
||||
func NewConsumer(group string, topics []string, handler types.MessageHandler, autoCommit bool) types.Consumer {
|
||||
license.CheckLicense()
|
||||
return kafka.NewConsumer(group, topics, handler)
|
||||
return kafka.NewConsumer(group, topics, handler, autoCommit)
|
||||
}
|
||||
|
||||
func NewProducer() types.Producer {
|
||||
license.CheckLicense()
|
||||
return kafka.NewProducer()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ which docker &> /dev/null || {
|
|||
}
|
||||
|
||||
|
||||
# https://parrot.asayer.io/os/license
|
||||
# https://api.openreplay.com/os/license
|
||||
# payload: {"mid": "UUID of the machine", "license": ""}
|
||||
# response {"data":{"valid": TRUE|FALSE, "expiration": expiration date in ms}}
|
||||
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@
|
|||
- all
|
||||
- name: Checking Enterprise Licence
|
||||
uri:
|
||||
url: https://parrot.asayer.io/os/license
|
||||
url: https://api.openreplay.com/os/license
|
||||
body:
|
||||
mid: "UUID of the machine"
|
||||
license: "{{ enterprise_edition_license }}"
|
||||
|
|
|
|||
|
|
@ -40,3 +40,7 @@ dependencies:
|
|||
repository: file://charts/redis
|
||||
version: 12.10.1
|
||||
condition: redis.enabled
|
||||
- name: minio
|
||||
repository: file://charts/minio
|
||||
version: 3.7.14
|
||||
condition: minio.enabled
|
||||
|
|
|
|||
|
|
@ -99,6 +99,9 @@ redis:
|
|||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
minio:
|
||||
enabled: true
|
||||
|
||||
postgresql:
|
||||
# postgresqlPassword: asayerPostgres
|
||||
fullnameOverride: postgresql
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ data:
|
|||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-For $origin_forwarded_ip;
|
||||
proxy_set_header X-Real-IP $origin_forwarded_ip;
|
||||
proxy_pass http://utilities-pool;
|
||||
proxy_pass http://utilities-openreplay.app.svc.cluster.local:9001;
|
||||
}
|
||||
location /assets/ {
|
||||
rewrite ^/assets/(.*) /sessions-assets/$1 break;
|
||||
|
|
|
|||
|
|
@ -83,6 +83,9 @@ autoscaling:
|
|||
|
||||
env:
|
||||
REDIS_URL: "redis://redis-master.db.svc.cluster.local:6379"
|
||||
debug: 0
|
||||
uws: false
|
||||
redis: false
|
||||
|
||||
|
||||
nodeSelector: {}
|
||||
|
|
|
|||
|
|
@ -95,13 +95,6 @@ chalice:
|
|||
# idp_name: ''
|
||||
# idp_tenantKey: ''
|
||||
|
||||
utilities:
|
||||
replicaCount: 1
|
||||
env:
|
||||
debug: 0
|
||||
uws: false
|
||||
redis: false
|
||||
|
||||
# If you want to override something
|
||||
# chartname:
|
||||
# filedFrom chart/Values.yaml:
|
||||
|
|
|
|||
|
|
@ -1,19 +1,51 @@
|
|||
Copyright (c) 2021 OpenReplay.com <support@openreplay.com>
|
||||
Copyright (c) 2022 Asayer, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
Reach out (license@openreplay.com) if you have any questions regarding the license.
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
------------------------------------------------------------------------------------
|
||||
|
||||
Elastic License 2.0 (ELv2)
|
||||
|
||||
**Acceptance**
|
||||
By using the software, you agree to all of the terms and conditions below.
|
||||
|
||||
**Copyright License**
|
||||
The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable license to use, copy, distribute, make available, and prepare derivative works of the software, in each case subject to the limitations and conditions below
|
||||
|
||||
**Limitations**
|
||||
You may not provide the software to third parties as a hosted or managed service, where the service provides users with access to any substantial set of the features or functionality of the software.
|
||||
|
||||
You may not move, change, disable, or circumvent the license key functionality in the software, and you may not remove or obscure any functionality in the software that is protected by the license key.
|
||||
|
||||
You may not alter, remove, or obscure any licensing, copyright, or other notices of the licensor in the software. Any use of the licensor’s trademarks is subject to applicable law.
|
||||
|
||||
**Patents**
|
||||
The licensor grants you a license, under any patent claims the licensor can license, or becomes able to license, to make, have made, use, sell, offer for sale, import and have imported the software, in each case subject to the limitations and conditions in this license. This license does not cover any patent claims that you cause to be infringed by modifications or additions to the software. If you or your company make any written claim that the software infringes or contributes to infringement of any patent, your patent license for the software granted under these terms ends immediately. If your company makes such a claim, your patent license ends immediately for work on behalf of your company.
|
||||
|
||||
**Notices**
|
||||
You must ensure that anyone who gets a copy of any part of the software from you also gets a copy of these terms.
|
||||
|
||||
If you modify the software, you must include in any modified copies of the software prominent notices stating that you have modified the software.
|
||||
|
||||
**No Other Rights**
|
||||
These terms do not imply any licenses other than those expressly granted in these terms.
|
||||
|
||||
**Termination**
|
||||
If you use the software in violation of these terms, such use is not licensed, and your licenses will automatically terminate. If the licensor provides you with a notice of your violation, and you cease all violation of this license no later than 30 days after you receive that notice, your licenses will be reinstated retroactively. However, if you violate these terms after such reinstatement, any additional violation of these terms will cause your licenses to terminate automatically and permanently.
|
||||
|
||||
**No Liability**
|
||||
As far as the law allows, the software comes as is, without any warranty or condition, and the licensor will not be liable to you for any damages arising out of these terms or the use or nature of the software, under any kind of legal claim.
|
||||
|
||||
**Definitions**
|
||||
The *licensor* is the entity offering these terms, and the *software* is the software the licensor makes available under these terms, including any portion of it.
|
||||
|
||||
*you* refers to the individual or entity agreeing to these terms.
|
||||
|
||||
*your company* is any legal entity, sole proprietorship, or other kind of organization that you work for, plus all organizations that have control over, are under the control of, or are under common control with that organization. *control* means ownership of substantially all the assets of an entity, or the power to direct its management and policies by vote, contract, or otherwise. Control can be direct or indirect.
|
||||
|
||||
*your licenses* are all the licenses granted to you for the software under these terms.
|
||||
|
||||
*use* means anything you do with the software requiring one of your licenses.
|
||||
|
||||
*trademark* means trademarks, service marks, and similar rights.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# OpenReplay Tracker Fetch plugin
|
||||
# Fetch plugin for OpenReplay
|
||||
|
||||
Tracker plugin to support tracking of the `fetch` requests payload.
|
||||
Additionally it populates the requests with `sessionToken` header for backend logging.
|
||||
This plugin allows you to capture `fetch` payloads and inspect them later on while replaying session recordings. This is very useful for understanding and fixing issues.
|
||||
|
||||
## Installation
|
||||
|
||||
|
|
@ -11,36 +10,120 @@ npm i @openreplay/tracker-fetch
|
|||
|
||||
## Usage
|
||||
|
||||
Initialize the `@openreplay/tracker` package as usual and load the plugin into it.
|
||||
Then you can use the provided `fetch` method from the plugin instead of built-in.
|
||||
Use the provided `fetch` method from the plugin instead of the one built-in.
|
||||
|
||||
### If your website is a Single Page Application (SPA)
|
||||
|
||||
```js
|
||||
import Tracker from '@openreplay/tracker';
|
||||
import tracker from '@openreplay/tracker';
|
||||
import trackerFetch from '@openreplay/tracker-fetch';
|
||||
|
||||
const tracker = new Tracker({
|
||||
projectKey: YOUR_PROJECT_KEY,
|
||||
const tracker = new OpenReplay({
|
||||
projectKey: PROJECT_KEY
|
||||
});
|
||||
const fetch = tracker.use(trackerFetch(options)); // check list of available options below
|
||||
|
||||
tracker.start();
|
||||
|
||||
export const fetch = tracker.use(trackerFetch({ /* options here*/ }));
|
||||
|
||||
fetch('https://my.api.io/resource').then(response => response.json()).then(body => console.log(body));
|
||||
fetch('https://myapi.com/').then(response => console.log(response.json()));
|
||||
```
|
||||
|
||||
Options:
|
||||
```ts
|
||||
{
|
||||
failuresOnly: boolean, // default false
|
||||
sessionTokenHeader: string | undefined, // default undefined
|
||||
ignoreHeaders: Array<string> | boolean, // default [ 'Cookie', 'Set-Cookie', 'Authorization' ]
|
||||
### If your web app is Server-Side-Rendered (SSR)
|
||||
|
||||
Follow the below example if your app is SSR. Ensure `tracker.start()` is called once the app is started (in `useEffect` or `componentDidMount`).
|
||||
|
||||
```js
|
||||
import OpenReplay from '@openreplay/tracker/cjs';
|
||||
import trackerFetch from '@openreplay/tracker-fetch/cjs';
|
||||
|
||||
const tracker = new OpenReplay({
|
||||
projectKey: PROJECT_KEY
|
||||
});
|
||||
const fetch = tracker.use(trackerFetch(options)); // check list of available options below
|
||||
|
||||
//...
|
||||
function MyApp() {
|
||||
useEffect(() => { // use componentDidMount in case of React Class Component
|
||||
tracker.start();
|
||||
|
||||
fetch('https://myapi.com/').then(response => console.log(response.json()));
|
||||
}, [])
|
||||
//...
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
Set `failuresOnly` option to `true` if you want to record only requests with the status code >= 400.
|
||||
## Options
|
||||
|
||||
In case you use [OpenReplay integrations (sentry, bugsnag or others)](https://docs.openreplay.com/integrations), you can use `sessionTokenHeader` option to specify the header name. This header will be appended automatically to the each fetch request and will contain OpenReplay session identificator value.
|
||||
```js
|
||||
trackerFetch({
|
||||
overrideGlobal: boolean;
|
||||
failuresOnly: boolean;
|
||||
sessionTokenHeader: string;
|
||||
ignoreHeaders: Array<string> | boolean;
|
||||
sanitiser: (RequestResponseData) => RequestResponseData | null;
|
||||
})
|
||||
```
|
||||
|
||||
You can define list of headers that you don't want to capture with the `ignoreHeaders` options. Set its value to `false` if you want to catch them all (`true` if opposite). By default plugin ignores the list of headers that might be sensetive such as `[ 'Cookie', 'Set-Cookie', 'Authorization' ]`.
|
||||
- `overrideGlobal`: Overrides the default `window.fetch`. Default: `false`.
|
||||
- `failuresOnly`: Captures requests having 4xx-5xx HTTP status code. Default: `false`.
|
||||
- `sessionTokenHeader`: In case you have enabled some of our backend [integrations](/integrations) (i.e. Sentry), you can use this option to specify the header name (i.e. 'X-OpenReplay-SessionToken'). This latter gets appended automatically to each fetch request to contain the OpenReplay sessionToken's value. Default: `undefined`.
|
||||
- `ignoreHeaders`: Helps define a list of headers you don't wish to capture. Set its value to `false` to capture all of them (`true` if none). Default: `['Cookie', 'Set-Cookie', 'Authorization']` so sensitive headers won't be captured.
|
||||
- `sanitiser`: Sanitise sensitive data from fetch request/response or ignore request comletely. You can redact fields on the request object by modifying then returning it from the function:
|
||||
|
||||
```typescript
|
||||
interface RequestData {
|
||||
body: BodyInit | null | undefined; // whatewer you've put in the init.body in fetch(url, init)
|
||||
headers: Record<string, string>;
|
||||
}
|
||||
|
||||
interface ResponseData {
|
||||
body: string | Object | null; // Object if response is of JSON type
|
||||
headers: Record<string, string>;
|
||||
}
|
||||
|
||||
interface RequestResponseData {
|
||||
readonly status: number;
|
||||
readonly method: string;
|
||||
url: string;
|
||||
request: RequestData;
|
||||
response: ResponseData;
|
||||
}
|
||||
|
||||
sanitiser: (data: RequestResponseData) => { // sanitise the body or headers
|
||||
if (data.url === "/auth") {
|
||||
data.request.body = null
|
||||
}
|
||||
|
||||
if (data.request.headers['x-auth-token']) { // can also use ignoreHeaders option instead
|
||||
data.request.headers['x-auth-token'] = 'SANITISED';
|
||||
}
|
||||
|
||||
// Sanitise response
|
||||
if (data.status < 400 && data.response.body.token) {
|
||||
data.response.body.token = "<TOKEN>"
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// OR
|
||||
|
||||
sanitiser: data => { // ignore requests that start with /secure
|
||||
if (data.url.startsWith("/secure")) {
|
||||
return null
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// OR
|
||||
|
||||
sanitiser: data => { // sanitise request url: replace all numbers
|
||||
data.url = data.url.replace(/\d/g, "*")
|
||||
return data
|
||||
}
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Having trouble setting up this plugin? please connect to our [Discord](https://discord.openreplay.com) and get help from our community.
|
||||
|
|
@ -1,7 +1,7 @@
|
|||
{
|
||||
"name": "@openreplay/tracker-fetch",
|
||||
"description": "Tracker plugin for fetch requests recording ",
|
||||
"version": "3.5.1",
|
||||
"version": "3.5.3",
|
||||
"keywords": [
|
||||
"fetch",
|
||||
"logging",
|
||||
|
|
|
|||
|
|
@ -1,41 +1,52 @@
|
|||
import { App, Messages } from '@openreplay/tracker';
|
||||
|
||||
interface Request {
|
||||
url: string,
|
||||
body: string | Object,
|
||||
headers: Record<string, string>,
|
||||
interface RequestData {
|
||||
body: BodyInit | null | undefined
|
||||
headers: Record<string, string>
|
||||
}
|
||||
|
||||
interface Response {
|
||||
url: string,
|
||||
status: number,
|
||||
body: string,
|
||||
headers: Record<string, string>,
|
||||
interface ResponseData {
|
||||
body: string | Object | null
|
||||
headers: Record<string, string>
|
||||
}
|
||||
|
||||
interface RequestResponseData {
|
||||
readonly status: number
|
||||
readonly method: string
|
||||
url: string
|
||||
request: RequestData
|
||||
response: ResponseData
|
||||
}
|
||||
|
||||
|
||||
export interface Options {
|
||||
sessionTokenHeader?: string;
|
||||
replaceDefault: boolean; // overrideDefault ?
|
||||
failuresOnly: boolean;
|
||||
ignoreHeaders: Array<string> | boolean;
|
||||
requestSanitizer: ((Request) => Request | null) | null;
|
||||
responseSanitizer: ((Response) => Response | null) | null;
|
||||
sessionTokenHeader?: string
|
||||
failuresOnly: boolean
|
||||
overrideGlobal: boolean
|
||||
ignoreHeaders: Array<string> | boolean
|
||||
sanitiser?: (RequestResponseData) => RequestResponseData | null
|
||||
|
||||
requestSanitizer?: any
|
||||
responseSanitizer?: any
|
||||
}
|
||||
|
||||
export default function(opts: Partial<Options> = {}) {
|
||||
const options: Options = Object.assign(
|
||||
{
|
||||
replaceDefault: false,
|
||||
overrideGlobal: false,
|
||||
failuresOnly: false,
|
||||
ignoreHeaders: [ 'Cookie', 'Set-Cookie', 'Authorization' ],
|
||||
requestSanitizer: null,
|
||||
responseSanitizer: null,
|
||||
},
|
||||
opts,
|
||||
);
|
||||
if (options.requestSanitizer && options.responseSanitizer) {
|
||||
console.warn("OpenReplay fetch plugin: `requestSanitizer` and `responseSanitizer` options are depricated. Please, use `sanitiser` instead (check out documentation at https://docs.openreplay.com/plugins/fetch).")
|
||||
}
|
||||
|
||||
const origFetch = window.fetch
|
||||
return (app: App | null) => {
|
||||
if (app === null) {
|
||||
return window.fetch;
|
||||
return origFetch
|
||||
}
|
||||
|
||||
const ihOpt = options.ignoreHeaders
|
||||
|
|
@ -45,7 +56,7 @@ export default function(opts: Partial<Options> = {}) {
|
|||
|
||||
const fetch = async (input: RequestInfo, init: RequestInit = {}) => {
|
||||
if (typeof input !== 'string') {
|
||||
return window.fetch(input, init);
|
||||
return origFetch(input, init);
|
||||
}
|
||||
if (options.sessionTokenHeader) {
|
||||
const sessionToken = app.getSessionToken();
|
||||
|
|
@ -63,7 +74,7 @@ export default function(opts: Partial<Options> = {}) {
|
|||
}
|
||||
}
|
||||
const startTime = performance.now();
|
||||
const response = await window.fetch(input, init);
|
||||
const response = await origFetch(input, init);
|
||||
const duration = performance.now() - startTime;
|
||||
if (options.failuresOnly && response.status < 400) {
|
||||
return response
|
||||
|
|
@ -89,56 +100,55 @@ export default function(opts: Partial<Options> = {}) {
|
|||
r.headers.forEach((v, n) => { if (!isHIgnoring(n)) resHs[n] = v })
|
||||
}
|
||||
|
||||
// Request forming
|
||||
let reqBody = ''
|
||||
if (typeof init.body === 'string') {
|
||||
reqBody = init.body
|
||||
} else if (typeof init.body === 'object') {
|
||||
try {
|
||||
reqBody = JSON.stringify(init.body)
|
||||
} catch {}
|
||||
}
|
||||
let req: Request | null = {
|
||||
url: input,
|
||||
const req: RequestData = {
|
||||
headers: reqHs,
|
||||
body: reqBody,
|
||||
}
|
||||
if (options.requestSanitizer !== null) {
|
||||
req = options.requestSanitizer(req)
|
||||
if (!req) {
|
||||
return
|
||||
}
|
||||
body: init.body,
|
||||
}
|
||||
|
||||
// Response forming
|
||||
let res: Response | null = {
|
||||
url: input,
|
||||
status: r.status,
|
||||
const res: ResponseData = {
|
||||
headers: resHs,
|
||||
body: text,
|
||||
}
|
||||
if (options.responseSanitizer !== null) {
|
||||
res = options.responseSanitizer(res)
|
||||
if (!res) {
|
||||
|
||||
const method = typeof init.method === 'string'
|
||||
? init.method.toUpperCase()
|
||||
: 'GET'
|
||||
let reqResInfo: RequestResponseData | null = {
|
||||
url: input,
|
||||
method,
|
||||
status: r.status,
|
||||
request: req,
|
||||
response: res,
|
||||
}
|
||||
if (options.sanitiser) {
|
||||
try {
|
||||
reqResInfo.response.body = JSON.parse(text) as Object // Why the returning type is "any"?
|
||||
} catch {}
|
||||
reqResInfo = options.sanitiser(reqResInfo)
|
||||
if (!reqResInfo) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
const reqStr = JSON.stringify({
|
||||
headers: req.headers,
|
||||
body: req.body,
|
||||
})
|
||||
const resStr = JSON.stringify({
|
||||
headers: res.headers,
|
||||
body: res.body,
|
||||
})
|
||||
const getStj = (r: RequestData | ResponseData): string => {
|
||||
if (r && typeof r.body !== 'string') {
|
||||
try {
|
||||
r.body = JSON.stringify(r.body)
|
||||
} catch {
|
||||
r.body = "<unable to stringify>"
|
||||
//app.log.warn("Openreplay fetch") // TODO: version check
|
||||
}
|
||||
}
|
||||
return JSON.stringify(r)
|
||||
}
|
||||
|
||||
app.send(
|
||||
Messages.Fetch(
|
||||
typeof init.method === 'string' ? init.method.toUpperCase() : 'GET',
|
||||
input,
|
||||
reqStr,
|
||||
resStr,
|
||||
method,
|
||||
String(reqResInfo.url),
|
||||
getStj(reqResInfo.request),
|
||||
getStj(reqResInfo.response),
|
||||
r.status,
|
||||
startTime + performance.timing.navigationStart,
|
||||
duration,
|
||||
|
|
@ -146,8 +156,9 @@ export default function(opts: Partial<Options> = {}) {
|
|||
)
|
||||
});
|
||||
return response;
|
||||
};
|
||||
if (options.replaceDefault) {
|
||||
}
|
||||
|
||||
if (options.overrideGlobal) {
|
||||
window.fetch = fetch
|
||||
}
|
||||
return fetch;
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue