openreplay/ee/backend/pkg/kafka/consumer.go
Shekar Siri f562355aed
v1.1.0 (#31)
* ci(deployment): injecting secrets

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* fix: typo

* feat(installation): Enterprise license check

* fix(install): reset ee cli args

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* Fix typo

* Update README.md

* feat (tracker-axios): init plugin

* fix (tracker-axios): version patch

* Fixed alert's unknown metrics handler

* fix (tracker-mobx): dev-dependencies and updated package-lock

* feat: APIs for user session data deleteion - wip

* fix: alert metric value of performance.speed_index

* Build and deploy scripts for enterprise edition (#13)

* feat(installation): enterprise installation

* chore(install): enabling ansible gather_facts

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* chore(install): quotes for enterprise key

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* chore(installation): enterprise install dbs

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* chore(install): rename yaml

* chore(install): change image tag

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* chore(install): License key variable added

* chore(deployment): Injecting enterprise license key in workers.

* chore(install): remove deprecated files

* chore(install): make domain_name mandatory in vars.yaml

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* chore(actions): ee workers

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* feat(install): use local docker instead of crictl

You can use the images built in the local machine, in installation,
without putting that in any external registry.

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* feat: APIs for user session data deleteion

* feat: prefix deleted mobs with DEL_

* feat: schedules to delete mobs

* chore(ci): fix ee build

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* feat(build): passing build args to internal scripts

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* chore(install): moving kafka topic creation at the end

Kafka pods usually takes time to be active.

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* chore(install): removing auth service.

* chore(install): Adding rancher for cluster management

* chore(install): proper name for alerts template

* separate requirements and clean up

* feat (frontend): typescript support

* feat (tracker): 3.0.4: maintain baseURL & connAttempt options

* feat(api): changed license validation

* feat(api): ee-license fix for unprovided value

* feat(api): fixed ee-signup cursor

* feat(api): FOS fix replay-mob issue

* feat(api): ee log ch-resources query

* chore(ci): change openreplay-cli with kube-install.sh

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* ci(actions): change ee naming

* feat(api): removed ch-logs

* feat(install): injecting ee variables only on ee installation.

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* chore(install): remove licence key from ee

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* fix(install): ch values for chalice

* feat(clickhouse): moved creation scripts to EE folder

* fix (backend-ee): disable ios tables so far

* chore(install): remove deprecated mandatory variables.

Signed-off-by: Rajesh Rajendran <rjshrjndrn@gmail.com>

* feat(api): remove duplicate files & changed signup

* fix(backend-ee): ch prepare after commit

* fix(backend-ee): syntax

* feat(api): added missing EE tenant column

* fix(scripts-ee): correct default clickhouse host

* feat(api): changed version_number location

* feat(api): ee log ch-errors query

* feat(api): ee fix ch-errors query

* feat: skip to issue button (#23)

* feat(api): 🐛 ee fix ambiguous ch-error query & accounts endpoint

* Feature: Autoplay Sessions (#22)

* feat: autoplay sessions

* change: removed unused import

* auto play filter by tab

* feat(api): changed JWT authorizer & API_KEY authorizer & fix undefined project_key

* feat (backend-devops): Dockerfile for all services in one image

* feat(sourcemap-uploader): --verbose argument use instead of --log

* feat(api): log middleware

* Feature - dom inspector (#28)

* feat (frontend): typescript support

* feat(frontend): DOM Inspector init

* fix(frontend): use tailwind bg

* feat(frontend dom-inspector): add element selection & deletion

* fix(frontend): todo comment

* di - styling wip

* feature(di) - editor theme

* feat(frontend): parse attributes with RE (+ability to add)

* feature(di) - input width

* fix(ui): di - review changes

Co-authored-by: ShiKhu <alex.kaminsky.11@gmail.com>

* chore(install): remove depricated init_dbs

* feat(api): ee override multi-tenant-core

* fix(frontend-build): gen css types before build

* fix(ui) - checking for the license (#30)

Co-authored-by: Rajesh Rajendran <rjshrjndrn@gmail.com>
Co-authored-by: Mehdi Osman <estradino@users.noreply.github.com>
Co-authored-by: ShiKhu <alex.kaminsky.11@gmail.com>
Co-authored-by: KRAIEM Taha Yassine <tahayk2@gmail.com>
Co-authored-by: Rajesh Rajendran <rjshrjndrn@users.noreply.github.com>
Co-authored-by: ourvakan <hi-psi@yandex.com>
Co-authored-by: tahayk2@gmail.com <enissay4ever4github>
2021-06-11 23:31:29 +05:30

238 lines
5.7 KiB
Go

package kafka
import (
"log"
"os"
// "os/signal"
// "syscall"
"time"
"github.com/pkg/errors"
"openreplay/backend/pkg/env"
"openreplay/backend/pkg/queue/types"
"gopkg.in/confluentinc/confluent-kafka-go.v1/kafka"
)
type Message = kafka.Message
type Consumer struct {
c *kafka.Consumer
messageHandler types.MessageHandler
commitTicker *time.Ticker
pollTimeout uint
lastKafkaEventTs int64
partitions []kafka.TopicPartition
}
func NewConsumer(group string, topics []string, messageHandler types.MessageHandler) *Consumer {
protocol := "plaintext"
if env.Bool("KAFKA_USE_SSL") {
protocol = "ssl"
}
c, err := kafka.NewConsumer(&kafka.ConfigMap{
"bootstrap.servers": env.String("KAFKA_SERVERS"),
"group.id": group,
"auto.offset.reset": "earliest",
"enable.auto.commit": "false",
"security.protocol": protocol,
"go.application.rebalance.enable": true,
})
if err != nil {
log.Fatalln(err)
}
subREx := "^("
for i, t := range topics {
if i != 0 {
subREx += "|"
}
subREx += t
}
subREx += ")$"
if err := c.Subscribe(subREx, nil); err != nil {
log.Fatalln(err)
}
return &Consumer{
c: c,
messageHandler: messageHandler,
commitTicker: time.NewTicker(2 * time.Minute),
pollTimeout: 200,
}
}
func (consumer *Consumer) DisableAutoCommit() {
consumer.commitTicker.Stop()
}
func (consumer *Consumer) Commit() error {
consumer.c.Commit() // TODO: return error if it is not "No offset stored"
return nil
}
func (consumer *Consumer) CommitBack(gap int64) error {
if consumer.lastKafkaEventTs == 0 || consumer.partitions == nil {
return nil
}
commitTs := consumer.lastKafkaEventTs - gap
var timestamps []kafka.TopicPartition
for _, p := range consumer.partitions { // p is a copy here sinse partition is not a pointer
p.Offset = kafka.Offset(commitTs)
timestamps = append(timestamps, p)
}
offsets, err := consumer.c.OffsetsForTimes(timestamps, 2000)
if err != nil {
return errors.Wrap(err, "Kafka Consumer back commit error")
}
// TODO: check per-partition errors: offsets[i].Error
// As an option: can store offsets and enable autocommit instead
_, err = consumer.c.CommitOffsets(offsets)
return errors.Wrap(err, "Kafka Consumer back commit error")
}
func (consumer *Consumer) ConsumeNext() error {
ev := consumer.c.Poll(int(consumer.pollTimeout))
if ev == nil {
return nil
}
select {
case <-consumer.commitTicker.C:
consumer.Commit()
default:
}
switch e := ev.(type) {
case *kafka.Message:
if e.TopicPartition.Error != nil {
return errors.Wrap(e.TopicPartition.Error, "Consumer Partition Error")
}
ts := e.Timestamp.UnixNano()/ 1e6
consumer.messageHandler(decodeKey(e.Key), e.Value, &types.Meta{
Topic: *(e.TopicPartition.Topic),
ID: uint64(e.TopicPartition.Offset),
Timestamp: ts,
})
consumer.lastKafkaEventTs = ts
case kafka.AssignedPartitions:
logPartitions("Kafka Consumer: Partitions Assigned", e.Partitions)
consumer.partitions = e.Partitions
consumer.c.Assign(e.Partitions)
case kafka.RevokedPartitions:
log.Println("Kafka Cosumer: Partitions Revoked")
consumer.partitions = nil
consumer.c.Unassign()
case kafka.Error:
if e.Code() == kafka.ErrAllBrokersDown {
os.Exit(1)
}
log.Printf("Consumer error: %v\n", e)
}
return nil
}
func (consumer *Consumer) Close() {
if consumer.commitTicker != nil {
consumer.Commit()
}
if err := consumer.c.Close(); err != nil {
log.Printf("Kafka consumer close error: %v", err)
}
}
// func (consumer *Consumer) consume(
// message func(m *kafka.Message) error,
// commit func(c *kafka.Consumer) error,
// ) error {
// if err := consumer.c.Subscribe(consumer.topic, nil); err != nil {
// return err
// }
// defer consumer.close()
// sigchan := make(chan os.Signal, 1)
// signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
// ticker := time.NewTicker(consumer.commitInterval)
// defer ticker.Stop()
// for {
// select {
// case <-sigchan:
// return commit(consumer.c)
// case <-ticker.C:
// if err := commit(consumer.c); err != nil {
// return err
// }
// default:
// ev := consumer.c.Poll(consumer.pollTimeout)
// if ev == nil {
// continue
// }
// switch e := ev.(type) {
// case *kafka.Message:
// if e.TopicPartition.Error != nil {
// log.Println(e.TopicPartition.Error)
// continue
// }
// if err := message(e); err != nil {
// return err
// }
// case kafka.AssignedPartitions:
// if err := consumer.c.Assign(e.Partitions); err != nil {
// return err
// }
// case kafka.RevokedPartitions:
// if err := commit(consumer.c); err != nil {
// return err
// }
// if err := consumer.c.Unassign(); err != nil {
// return err
// }
// case kafka.Error:
// log.Println(e)
// if e.Code() == kafka.ErrAllBrokersDown {
// return e
// }
// }
// }
// }
// }
// func (consumer *Consumer) Consume(
// message func(key uint64, value []byte) error,
// ) error {
// return consumer.consume(
// func(m *kafka.Message) error {
// return message(decodeKey(m.Key), m.Value)
// },
// func(c *kafka.Consumer) error {
// if _, err := c.Commit(); err != nil {
// log.Println(err)
// }
// return nil
// },
// )
// }
// func (consumer *Consumer) ConsumeWithCommitHook(
// message func(key uint64, value []byte) error,
// commit func() error,
// ) error {
// return consumer.consume(
// func(m *kafka.Message) error {
// return message(decodeKey(m.Key), m.Value)
// },
// func(c *kafka.Consumer) error {
// if err := commit(); err != nil {
// return err
// }
// if _, err := c.Commit(); err != nil {
// log.Println(err)
// }
// return nil
// },
// )
// }