* ci(deployment): injecting secrets Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * fix: typo * feat(installation): Enterprise license check * fix(install): reset ee cli args Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * Fix typo * Update README.md * feat (tracker-axios): init plugin * fix (tracker-axios): version patch * Fixed alert's unknown metrics handler * fix (tracker-mobx): dev-dependencies and updated package-lock * feat: APIs for user session data deleteion - wip * fix: alert metric value of performance.speed_index * Build and deploy scripts for enterprise edition (#13) * feat(installation): enterprise installation * chore(install): enabling ansible gather_facts Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(install): quotes for enterprise key Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(installation): enterprise install dbs Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(install): rename yaml * chore(install): change image tag Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(install): License key variable added * chore(deployment): Injecting enterprise license key in workers. * chore(install): remove deprecated files * chore(install): make domain_name mandatory in vars.yaml Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(actions): ee workers Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * feat(install): use local docker instead of crictl You can use the images built in the local machine, in installation, without putting that in any external registry. Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * feat: APIs for user session data deleteion * feat: prefix deleted mobs with DEL_ * feat: schedules to delete mobs * chore(ci): fix ee build Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * feat(build): passing build args to internal scripts Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(install): moving kafka topic creation at the end Kafka pods usually takes time to be active. Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(install): removing auth service. * chore(install): Adding rancher for cluster management * chore(install): proper name for alerts template * separate requirements and clean up * feat (frontend): typescript support * feat (tracker): 3.0.4: maintain baseURL & connAttempt options * feat(api): changed license validation * feat(api): ee-license fix for unprovided value * feat(api): fixed ee-signup cursor * feat(api): FOS fix replay-mob issue * feat(api): ee log ch-resources query * chore(ci): change openreplay-cli with kube-install.sh Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * ci(actions): change ee naming * feat(api): removed ch-logs * feat(install): injecting ee variables only on ee installation. Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * chore(install): remove licence key from ee Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * fix(install): ch values for chalice * feat(clickhouse): moved creation scripts to EE folder * fix (backend-ee): disable ios tables so far * chore(install): remove deprecated mandatory variables. Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com> * feat(api): remove duplicate files & changed signup * fix(backend-ee): ch prepare after commit * fix(backend-ee): syntax * feat(api): added missing EE tenant column * fix(scripts-ee): correct default clickhouse host * feat(api): changed version_number location * feat(api): ee log ch-errors query * feat(api): ee fix ch-errors query * feat: skip to issue button (#23) * feat(api): 🐛 ee fix ambiguous ch-error query & accounts endpoint * Feature: Autoplay Sessions (#22) * feat: autoplay sessions * change: removed unused import * auto play filter by tab * feat(api): changed JWT authorizer & API_KEY authorizer & fix undefined project_key * feat (backend-devops): Dockerfile for all services in one image * feat(sourcemap-uploader): --verbose argument use instead of --log * feat(api): log middleware * Feature - dom inspector (#28) * feat (frontend): typescript support * feat(frontend): DOM Inspector init * fix(frontend): use tailwind bg * feat(frontend dom-inspector): add element selection & deletion * fix(frontend): todo comment * di - styling wip * feature(di) - editor theme * feat(frontend): parse attributes with RE (+ability to add) * feature(di) - input width * fix(ui): di - review changes Co-authored-by: ShiKhu <alex.kaminsky.11@gmail.com> * chore(install): remove depricated init_dbs * feat(api): ee override multi-tenant-core * fix(frontend-build): gen css types before build * fix(ui) - checking for the license (#30) Co-authored-by: Rajesh Rajendran <rjshrjndrn@gmail.com> Co-authored-by: Mehdi Osman <estradino@users.noreply.github.com> Co-authored-by: ShiKhu <alex.kaminsky.11@gmail.com> Co-authored-by: KRAIEM Taha Yassine <tahayk2@gmail.com> Co-authored-by: Rajesh Rajendran <rjshrjndrn@users.noreply.github.com> Co-authored-by: ourvakan <hi-psi@yandex.com> Co-authored-by: tahayk2@gmail.com <enissay4ever4github>
228 lines
No EOL
11 KiB
Go
228 lines
No EOL
11 KiB
Go
package postgres
|
|
|
|
import (
|
|
"database/sql"
|
|
"errors"
|
|
"fmt"
|
|
sq "github.com/Masterminds/squirrel"
|
|
"log"
|
|
"strconv"
|
|
"time"
|
|
)
|
|
|
|
type TimeString sql.NullString
|
|
type query struct {
|
|
Left string `db:"query.left" json:"left"`
|
|
Operator string `db:"query.operator" json:"operator"`
|
|
Right float64 `db:"query.right" json:"right"`
|
|
}
|
|
type options struct {
|
|
RenotifyInterval int64 `db:"options.renotifyInterval" json:"renotifyInterval"`
|
|
LastNotification int64 `db:"options.lastNotification" json:"lastNotification;omitempty"`
|
|
CurrentPeriod int64 `db:"options.currentPeriod" json:"currentPeriod"`
|
|
PreviousPeriod int64 `db:"options.previousPeriod" json:"previousPeriod;omitempty"`
|
|
Message []map[string]string `db:"options.message" json:"message;omitempty"`
|
|
Change string `db:"options.change" json:"change;omitempty"`
|
|
}
|
|
type Alert struct {
|
|
AlertID uint32 `db:"alert_id" json:"alert_id"`
|
|
ProjectID uint32 `db:"project_id" json:"project_id"`
|
|
Name string `db:"name" json:"name"`
|
|
Description sql.NullString `db:"description" json:"description"`
|
|
Active bool `db:"active" json:"active"`
|
|
DetectionMethod string `db:"detection_method" json:"detection_method"`
|
|
Query query `db:"query" json:"query"`
|
|
DeletedAt *int64 `db:"deleted_at" json:"deleted_at"`
|
|
CreatedAt *int64 `db:"created_at" json:"created_at"`
|
|
Options options `db:"options" json:"options"`
|
|
TenantId uint32 `db:"tenant_id" json:"tenant_id"`
|
|
}
|
|
|
|
func (pg *Conn) IterateAlerts(iter func(alert *Alert, err error)) error {
|
|
rows, err := pg.query(`
|
|
SELECT
|
|
alerts.alert_id,
|
|
alerts.project_id,
|
|
alerts.name,
|
|
alerts.description,
|
|
alerts.active,
|
|
alerts.detection_method,
|
|
alerts.query,
|
|
CAST(EXTRACT(epoch FROM alerts.deleted_at) * 1000 AS BIGINT) AS deleted_at,
|
|
CAST(EXTRACT(epoch FROM alerts.created_at) * 1000 AS BIGINT) AS created_at,
|
|
alerts.options,
|
|
projects.tenant_id
|
|
FROM public.alerts INNER JOIN public.projects USING(project_id)
|
|
WHERE alerts.active AND alerts.deleted_at ISNULL;
|
|
`)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer rows.Close()
|
|
for rows.Next() {
|
|
a := new(Alert)
|
|
if err = rows.Scan(
|
|
&a.AlertID,
|
|
&a.ProjectID,
|
|
&a.Name,
|
|
&a.Description,
|
|
&a.Active,
|
|
&a.DetectionMethod,
|
|
&a.Query,
|
|
&a.DeletedAt,
|
|
&a.CreatedAt,
|
|
&a.Options,
|
|
&a.TenantId,
|
|
); err != nil {
|
|
iter(nil, err)
|
|
continue
|
|
}
|
|
iter(a, nil)
|
|
}
|
|
|
|
if err = rows.Err(); err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (pg *Conn) SaveLastNotification(allIds []uint32) error {
|
|
var paramrefs string
|
|
for _, v := range allIds {
|
|
paramrefs += strconv.Itoa(int(v)) + `,`
|
|
}
|
|
paramrefs = paramrefs[:len(paramrefs)-1] // remove last ","
|
|
q := "UPDATE public.Alerts SET options = options||'{\"lastNotification\":" + strconv.Itoa(int(time.Now().Unix()*1000)) + "}'::jsonb WHERE alert_id IN (" + paramrefs + ");"
|
|
//log.Println(q)
|
|
log.Println("Updating PG")
|
|
return pg.exec(q)
|
|
}
|
|
|
|
type columnDefinition struct {
|
|
table string
|
|
formula string
|
|
condition string
|
|
group string
|
|
}
|
|
|
|
var LeftToDb = map[string]columnDefinition{
|
|
"performance.dom_content_loaded.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "COALESCE(AVG(NULLIF(dom_content_loaded_time ,0)),0)"},
|
|
"performance.first_meaningful_paint.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "COALESCE(AVG(NULLIF(first_contentful_paint_time,0)),0)"},
|
|
"performance.page_load_time.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(load_time ,0))"},
|
|
"performance.dom_build_time.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(dom_building_time,0))"},
|
|
"performance.speed_index.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(speed_index,0))"},
|
|
"performance.page_response_time.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(response_time,0))"},
|
|
"performance.ttfb.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(first_paint_time,0))"},
|
|
"performance.time_to_render.average": {table: "events.pages INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(visually_complete,0))"},
|
|
"performance.image_load_time.average": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(resources.duration,0))", condition: "type='img'"},
|
|
"performance.request_load_time.average": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(resources.duration,0))", condition: "type='fetch'"},
|
|
"resources.load_time.average": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "AVG(NULLIF(resources.duration,0))"},
|
|
"resources.missing.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(DISTINCT url_hostpath)", condition: "success= FALSE"},
|
|
"errors.4xx_5xx.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(session_id)", condition: "status/100!=2"},
|
|
"errors.4xx.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(session_id)", condition: "status/100=4"},
|
|
"errors.5xx.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(session_id)", condition: "status/100=5"},
|
|
"errors.javascript.impacted_sessions.count": {table: "events.resources INNER JOIN public.sessions USING(session_id)", formula: "COUNT(DISTINCT session_id)", condition: "success= FALSE AND type='script'"},
|
|
"performance.crashes.count": {table: "(SELECT *, start_ts AS timestamp FROM public.sessions WHERE errors_count > 0) AS sessions", formula: "COUNT(DISTINCT session_id)", condition: "errors_count > 0"},
|
|
"errors.javascript.count": {table: "events.errors INNER JOIN public.errors AS m_errors USING (error_id)", formula: "COUNT(DISTINCT session_id)", condition: "source='js_exception'"},
|
|
"errors.backend.count": {table: "events.errors INNER JOIN public.errors AS m_errors USING (error_id)", formula: "COUNT(DISTINCT session_id)", condition: "source!='js_exception'"},
|
|
}
|
|
|
|
//This is the frequency of execution for each threshold
|
|
var TimeInterval = map[int64]int64{
|
|
15: 3,
|
|
30: 5,
|
|
60: 10,
|
|
120: 20,
|
|
240: 30,
|
|
1440: 60,
|
|
}
|
|
|
|
func (a *Alert) CanCheck() bool {
|
|
now := time.Now().Unix() * 1000
|
|
var repetitionBase int64
|
|
|
|
if repetitionBase = a.Options.CurrentPeriod; a.DetectionMethod == "change" && a.Options.CurrentPeriod > a.Options.PreviousPeriod {
|
|
repetitionBase = a.Options.PreviousPeriod
|
|
}
|
|
|
|
if _, ok := TimeInterval[repetitionBase]; !ok {
|
|
log.Printf("repetitionBase: %d NOT FOUND", repetitionBase)
|
|
return false
|
|
}
|
|
return a.DeletedAt == nil && a.Active &&
|
|
(a.Options.RenotifyInterval <= 0 ||
|
|
a.Options.LastNotification <= 0 ||
|
|
((now - a.Options.LastNotification) > a.Options.RenotifyInterval*60*1000)) &&
|
|
((now-*a.CreatedAt)%(TimeInterval[repetitionBase]*60*1000)) < 60*1000
|
|
}
|
|
|
|
func (a *Alert) Build() (sq.SelectBuilder, error) {
|
|
colDef, ok := LeftToDb[a.Query.Left]
|
|
if !ok {
|
|
return sq.Select(), errors.New(fmt.Sprintf("!! unsupported metric '%s' from alert: %d:%s\n", a.Query.Left, a.AlertID, a.Name))
|
|
}
|
|
|
|
subQ := sq.
|
|
Select(colDef.formula + " AS value").
|
|
From(colDef.table).
|
|
Where(sq.And{sq.Expr("project_id = $1 ", a.ProjectID),
|
|
sq.Expr(colDef.condition)})
|
|
q := sq.Select(fmt.Sprint("value, coalesce(value,0)", a.Query.Operator, a.Query.Right, " AS valid"))
|
|
if len(colDef.group) > 0 {
|
|
subQ = subQ.Column(colDef.group + " AS group_value")
|
|
subQ = subQ.GroupBy(colDef.group)
|
|
q = q.Column("group_value")
|
|
}
|
|
|
|
if a.DetectionMethod == "threshold" {
|
|
q = q.FromSelect(subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60)), "stat")
|
|
} else if a.DetectionMethod == "change" {
|
|
if a.Options.Change == "change" {
|
|
if len(colDef.group) == 0 {
|
|
sub1, args1, _ := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60)).ToSql()
|
|
sub2, args2, _ := subQ.Where(
|
|
sq.And{
|
|
sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
|
sq.Expr("timestamp>=$4 ", time.Now().Unix()-2*a.Options.CurrentPeriod*60),
|
|
}).ToSql()
|
|
sub1, _, _ = sq.Expr("SELECT ((" + sub1 + ")-(" + sub2 + ")) AS value").ToSql()
|
|
q = q.JoinClause("FROM ("+sub1+") AS stat", append(args1, args2...)...)
|
|
} else {
|
|
subq1 := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60))
|
|
sub2, args2, _ := subQ.Where(
|
|
sq.And{
|
|
sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
|
sq.Expr("timestamp>=$4 ", time.Now().Unix()-2*a.Options.CurrentPeriod*60),
|
|
}).ToSql()
|
|
sub1 := sq.Select("group_value", "(stat1.value-stat2.value) AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...)
|
|
q = q.FromSelect(sub1, "stat")
|
|
}
|
|
} else if a.Options.Change == "percent" {
|
|
if len(colDef.group) == 0 {
|
|
sub1, args1, _ := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60)).ToSql()
|
|
sub2, args2, _ := subQ.Where(
|
|
sq.And{
|
|
sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
|
sq.Expr("timestamp>=$4 ", time.Now().Unix()-a.Options.PreviousPeriod*60-a.Options.CurrentPeriod*60),
|
|
}).ToSql()
|
|
sub1, _, _ = sq.Expr("SELECT ((" + sub1 + ")/(" + sub2 + ")-1)*100 AS value").ToSql()
|
|
q = q.JoinClause("FROM ("+sub1+") AS stat", append(args1, args2...)...)
|
|
} else {
|
|
subq1 := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod*60))
|
|
sub2, args2, _ := subQ.Where(
|
|
sq.And{
|
|
sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod*60),
|
|
sq.Expr("timestamp>=$4 ", time.Now().Unix()-a.Options.PreviousPeriod*60-a.Options.CurrentPeriod*60),
|
|
}).ToSql()
|
|
sub1 := sq.Select("group_value", "(stat1.value/stat2.value-1)*100 AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...)
|
|
q = q.FromSelect(sub1, "stat")
|
|
}
|
|
} else {
|
|
return q, errors.New("unsupported change method")
|
|
}
|
|
|
|
} else {
|
|
return q, errors.New("unsupported detection method")
|
|
}
|
|
return q, nil
|
|
} |