Merge remote-tracking branch 'origin/dev' into dev
This commit is contained in:
commit
b009eed102
10 changed files with 110 additions and 42 deletions
2
.github/workflows/api.yaml
vendored
2
.github/workflows/api.yaml
vendored
|
|
@ -45,7 +45,7 @@ jobs:
|
|||
}
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd ../scripts/helm/
|
||||
cd scripts/helm/
|
||||
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
|
||||
sed -i "s/tag:.*/tag: \"$IMAGE_TAG\"/g" app/chalice.yaml
|
||||
bash kube-install.sh --app chalice
|
||||
|
|
|
|||
30
.github/workflows/frontend.yaml
vendored
30
.github/workflows/frontend.yaml
vendored
|
|
@ -22,26 +22,24 @@ jobs:
|
|||
${{ runner.OS }}-build-
|
||||
${{ runner.OS }}-
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
- name: Install
|
||||
run: npm install
|
||||
|
||||
- name: Build
|
||||
run: npm run build:staging
|
||||
env:
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Deploy
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
AWS_REGION: eu-central-1
|
||||
AWS_S3_BUCKET_NAME: ${{ secrets.AWS_S3_BUCKET_NAME }}
|
||||
- name: Build and deploy
|
||||
run: |
|
||||
aws configure set default.s3.signature_version s3v4
|
||||
aws --endpoint-url https://${{secrets.DOMAIN_NAME}}/frontend/ s3 cp \
|
||||
--recursive \
|
||||
--region "$AWS_REGION" \
|
||||
public s3://$AWS_S3_BUCKET_NAME
|
||||
cd frontend
|
||||
bash build.sh
|
||||
cp -arl public frontend
|
||||
minio_pod=$(kubectl get po -n db -l app.kubernetes.io/name=minio -n db --output custom-columns=name:.metadata.name | tail -n+2)
|
||||
echo $minio_pod
|
||||
echo copying frontend to container.
|
||||
kubectl -n db cp frontend $minio_pod:/data/
|
||||
rm -rf frontend
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
|
|
|
|||
19
.github/workflows/workers.yaml
vendored
19
.github/workflows/workers.yaml
vendored
|
|
@ -55,7 +55,8 @@ jobs:
|
|||
cd backend
|
||||
for image in $(cat images_to_build.txt);
|
||||
do
|
||||
bash ./build.sh skip $image
|
||||
echo "Bulding $image"
|
||||
bash -x ./build.sh skip $image
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
|
|
@ -72,11 +73,11 @@ jobs:
|
|||
bash kube-install.sh --app $image
|
||||
done
|
||||
|
||||
- name: Debug Job
|
||||
if: ${{ failure() }}
|
||||
uses: mxschmitt/action-tmate@v3
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
#
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ check_prereq() {
|
|||
function build_api(){
|
||||
# Copy enterprize code
|
||||
[[ $1 == "ee" ]] && {
|
||||
cp ../ee/backend/* ./
|
||||
cp -r ../ee/backend/* ./
|
||||
ee="true"
|
||||
}
|
||||
[[ $2 != "" ]] && {
|
||||
|
|
|
|||
|
|
@ -15,11 +15,11 @@ import (
|
|||
"openreplay/backend/services/assets/cacher"
|
||||
)
|
||||
|
||||
// empty commit to trigger build.
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
GROUP_CACHE := env.String("GROUP_CACHE")
|
||||
GROUP_CACHE := env.String("GROUP_CACHE")
|
||||
TOPIC_TRIGGER := env.String("TOPIC_TRIGGER")
|
||||
|
||||
cacher := cacher.NewCacher(
|
||||
|
|
@ -30,10 +30,10 @@ func main() {
|
|||
)
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
GROUP_CACHE,
|
||||
[]string{TOPIC_TRIGGER},
|
||||
GROUP_CACHE,
|
||||
[]string{ TOPIC_TRIGGER },
|
||||
func(sessionID uint64, message messages.Message, e *types.Meta) {
|
||||
switch msg := message.(type) {
|
||||
switch msg := message.(type) {
|
||||
case *messages.AssetCache:
|
||||
cacher.CacheURL(sessionID, msg.URL)
|
||||
case *messages.ErrorEvent:
|
||||
|
|
@ -48,14 +48,15 @@ func main() {
|
|||
for _, source := range sourceList {
|
||||
cacher.CacheJSFile(source)
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
tick := time.Tick(20 * time.Minute)
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
for {
|
||||
select {
|
||||
|
|
@ -71,4 +72,4 @@ func main() {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -4,6 +4,8 @@ import (
|
|||
"log"
|
||||
"database/sql"
|
||||
_ "github.com/ClickHouse/clickhouse-go"
|
||||
|
||||
"openreplay/backend/pkg/license"
|
||||
)
|
||||
|
||||
type Connector struct {
|
||||
|
|
@ -27,6 +29,8 @@ type Connector struct {
|
|||
}
|
||||
|
||||
func NewConnector(url string) *Connector {
|
||||
license.CheckLicense()
|
||||
|
||||
db, err := sql.Open("clickhouse", url)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
|
|
|
|||
61
ee/backend/pkg/license/check.go
Normal file
61
ee/backend/pkg/license/check.go
Normal file
|
|
@ -0,0 +1,61 @@
|
|||
package license
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"bytes"
|
||||
|
||||
"openreplay/backend/pkg/env"
|
||||
)
|
||||
|
||||
|
||||
|
||||
type request struct {
|
||||
MID string `json:"mid"`
|
||||
License string `json:"license"`
|
||||
}
|
||||
|
||||
type response struct {
|
||||
Data struct {
|
||||
IsValid bool `json:"valid"`
|
||||
ExpirationTimestamp int64 `json:"expiration"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
|
||||
func CheckLicense() {
|
||||
license := env.String("LICENSE_KEY")
|
||||
|
||||
requestBody, err := json.Marshal(request{ License: license })
|
||||
if err != nil {
|
||||
log.Fatal("Can not form a license check request.")
|
||||
}
|
||||
|
||||
resp, err := http.Post("https://parrot.asayer.io/os/license", "application/json", bytes.NewReader(requestBody))
|
||||
if err != nil {
|
||||
log.Fatalf("Error while checking license. %v", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
log.Fatal("Error on license check request.")
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
log.Fatalf("Error while reading license check response. %v", err)
|
||||
}
|
||||
|
||||
respJson := new(response)
|
||||
if err = json.Unmarshal(body, respJson); err != nil {
|
||||
log.Fatalf("Error while parsing license check response. %v", err)
|
||||
}
|
||||
|
||||
if !respJson.Data.IsValid {
|
||||
log.Fatal("License is not valid.")
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
@ -3,13 +3,16 @@ package queue
|
|||
import (
|
||||
"openreplay/backend/pkg/kafka"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/license"
|
||||
)
|
||||
|
||||
func NewConsumer(group string, topics []string, handler types.MessageHandler) types.Consumer {
|
||||
license.CheckLicense()
|
||||
return kafka.NewConsumer(group, topics, handler)
|
||||
}
|
||||
|
||||
func NewProducer() types.Producer {
|
||||
license.CheckLicense()
|
||||
return kafka.NewProducer()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
)
|
||||
|
||||
var ch *clickhouse.Connector
|
||||
var finalizeTicker *time.Ticker
|
||||
var finalizeTicker <-chan time.Time
|
||||
|
||||
func initStats() {
|
||||
ch = clickhouse.NewConnector(env.String("CLICKHOUSE_STRING"))
|
||||
|
|
@ -20,7 +20,7 @@ func initStats() {
|
|||
log.Fatalf("Clickhouse prepare error: %v\n", err)
|
||||
}
|
||||
|
||||
finalizeTicker = time.NewTicker(20 * time.Minute)
|
||||
finalizeTicker = time.Tick(20 * time.Minute)
|
||||
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@
|
|||
# Give absolute file path.
|
||||
# Use following command to get the full file path
|
||||
# `readlink -f <file>`
|
||||
kubeconfig_path: /home/rajeshr/.cred/asayer/kube-oss.yaml
|
||||
kubeconfig_path: ""
|
||||
|
||||
###################
|
||||
## Optional Fields.
|
||||
|
|
@ -36,8 +36,8 @@ domain_name: ""
|
|||
# By Default, we'll create a self signed certificate for nginx, and populate the values here.
|
||||
# Once you've proper domain name, and ssl certificate
|
||||
# Change the following variables accordingly.
|
||||
nginx_ssl_cert_file_path: "/home/rajesh/Documents/projects/asayer/asayer-os/scripts/helm/nginx-ingress/site.crt"
|
||||
nginx_ssl_key_file_path: "/home/rajesh/Documents/projects/asayer/asayer-os/scripts/helm/nginx-ingress/site.key"
|
||||
nginx_ssl_cert_file_path: ""
|
||||
nginx_ssl_key_file_path: ""
|
||||
|
||||
# Enable monitoring
|
||||
# If set, monitoring stack will be installed
|
||||
|
|
@ -50,5 +50,5 @@ enable_monitoring: "false"
|
|||
# If not defined, will generate at runtime.
|
||||
# Use following command to generate passwordwill give
|
||||
# `openssl rand -base64 30`
|
||||
minio_access_key: "YkkPAPYjogRlicqvCuNSHkfsdGtCCq"
|
||||
minio_secret_key: "MSVmVGXfTpVNKUfVYdrKQemekoFeUg"
|
||||
minio_access_key: ""
|
||||
minio_secret_key: ""
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue