fix(backend): removed redundant files
This commit is contained in:
parent
da2262395d
commit
27806054e0
4 changed files with 0 additions and 213 deletions
|
|
@ -1,55 +0,0 @@
|
|||
package player
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
type request struct {
|
||||
ts int64
|
||||
method string
|
||||
url string
|
||||
headers map[string][]string
|
||||
body []byte
|
||||
}
|
||||
|
||||
type playerImpl struct {
|
||||
//
|
||||
}
|
||||
|
||||
func (p playerImpl) LoadRecord(filePath string) error {
|
||||
if filePath == "" {
|
||||
return fmt.Errorf("file name is empty")
|
||||
}
|
||||
file, err := os.OpenFile(filePath, os.O_RDONLY, 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open file err: %s", err)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
sc := bufio.NewScanner(file)
|
||||
for sc.Scan() {
|
||||
line := sc.Text()
|
||||
log.Println(line)
|
||||
}
|
||||
if err := sc.Err(); err != nil {
|
||||
return fmt.Errorf("scan file error: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p playerImpl) PlayRecord(host string) error {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
type Player interface {
|
||||
LoadRecord(filePath string) error
|
||||
PlayRecord(host string) error
|
||||
}
|
||||
|
||||
func New() Player {
|
||||
return &playerImpl{}
|
||||
}
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
package player
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestPlayer(t *testing.T) {
|
||||
player := New()
|
||||
if err := player.LoadRecord("/Users/alexander/7048055123532800"); err != nil {
|
||||
t.Logf("can't load session record: %s", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -1,94 +0,0 @@
|
|||
package recorder
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type recorderImpl struct {
|
||||
sessions map[uint64]*sync.Mutex
|
||||
sessionsDir string
|
||||
}
|
||||
|
||||
func (r *recorderImpl) SaveRequest(sessionID uint64, req *http.Request, body []byte) error {
|
||||
pwd, _ := os.Getwd()
|
||||
log.Printf("new request, pwd: %s", pwd)
|
||||
// Hold mutex for session
|
||||
if _, ok := r.sessions[sessionID]; !ok {
|
||||
r.sessions[sessionID] = &sync.Mutex{}
|
||||
}
|
||||
r.sessions[sessionID].Lock()
|
||||
// Release mutex for session on exit
|
||||
defer r.sessions[sessionID].Unlock()
|
||||
|
||||
// Open file
|
||||
file, err := os.OpenFile(r.sessionsDir+strconv.FormatUint(sessionID, 10), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Close file on exit
|
||||
defer file.Close()
|
||||
log.Printf("file name: %s", strconv.FormatUint(sessionID, 10))
|
||||
|
||||
// Save request info
|
||||
/*
|
||||
Record format:
|
||||
- timestamp
|
||||
- method
|
||||
- url
|
||||
- headers
|
||||
- body
|
||||
*/
|
||||
if _, err := file.Write([]byte("<request>\n")); err != nil {
|
||||
log.Printf("can't write data to file: %s", err)
|
||||
}
|
||||
if _, err := file.Write([]byte(fmt.Sprintf("<ts>%d</ts>\n", time.Now().UnixMilli()))); err != nil {
|
||||
log.Printf("can't write timestamp to file: %s", err)
|
||||
}
|
||||
if _, err := file.Write([]byte(fmt.Sprintf("<method>%s</method>\n", req.Method))); err != nil {
|
||||
log.Printf("can't write method to file: %s", err)
|
||||
}
|
||||
if _, err := file.Write([]byte(fmt.Sprintf("<url>%s</url>\n", req.URL.Path))); err != nil {
|
||||
log.Printf("can't write url to file: %s", err)
|
||||
}
|
||||
reqHeaders, err := json.Marshal(req.Header)
|
||||
if err == nil {
|
||||
if _, err := file.Write([]byte(fmt.Sprintf("<headers>%s</headers>\n", string(reqHeaders)))); err != nil {
|
||||
log.Printf("can't write headers to file: %s", err)
|
||||
}
|
||||
} else {
|
||||
log.Printf("can't marshal request headers: %s", err)
|
||||
}
|
||||
if _, err := file.Write([]byte(fmt.Sprintf("<body>%s</body>\n", string(body)))); err != nil {
|
||||
log.Printf("can't write body to file: %s", err)
|
||||
}
|
||||
if _, err := file.Write([]byte("</request>\n")); err != nil {
|
||||
log.Printf("can't write data to file: %s", err)
|
||||
}
|
||||
|
||||
// Sync file changes
|
||||
if err := file.Sync(); err != nil {
|
||||
log.Printf("can't sync file: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Recorder interface {
|
||||
SaveRequest(sessionID uint64, req *http.Request, body []byte) error
|
||||
}
|
||||
|
||||
func New(dir string) Recorder {
|
||||
if dir == "" {
|
||||
dir = "./"
|
||||
}
|
||||
return &recorderImpl{
|
||||
sessions: make(map[uint64]*sync.Mutex),
|
||||
sessionsDir: dir,
|
||||
}
|
||||
}
|
||||
|
|
@ -1,54 +0,0 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type poolMock struct {
|
||||
//
|
||||
}
|
||||
|
||||
func (p poolMock) Query(sql string, args ...interface{}) (pgx.Rows, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (p poolMock) QueryRow(sql string, args ...interface{}) pgx.Row {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p poolMock) Exec(sql string, arguments ...interface{}) error {
|
||||
fmt.Println(sql)
|
||||
fmt.Println(arguments...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p poolMock) SendBatch(b *pgx.Batch) pgx.BatchResults {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p poolMock) Begin() (*_Tx, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (p poolMock) Close() {
|
||||
}
|
||||
|
||||
func NewPoolMock() Pool {
|
||||
return &poolMock{}
|
||||
}
|
||||
|
||||
func TestBulk(t *testing.T) {
|
||||
conn := NewPoolMock()
|
||||
bulk, err := NewBulk(conn, "autocomplete", "(value, type, project_id)", "($%d, $%d, $%d)", 3, 10)
|
||||
if err != nil {
|
||||
t.Errorf("can't create bulk: %s", err)
|
||||
}
|
||||
for i := 0; i < 10; i++ {
|
||||
if err := bulk.Append(fmt.Sprintf("var1+%d", i), fmt.Sprintf("var2+%d", i),
|
||||
i%2 == 0); err != nil {
|
||||
t.Errorf("can't add new values to bulk: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue