Compare commits
No commits in common. "feature/add-log-rotation" and "main" have entirely different histories.
feature/ad
...
main
|
@ -5,7 +5,6 @@ artifacts
|
|||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
coverage
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
|
27
Makefile
27
Makefile
|
@ -1,7 +1,6 @@
|
|||
# Inspired from https://dustinspecker.com/posts/go-combined-unit-integration-code-coverage/ and https://netdevops.me/2023/test-coverage-for-go-integration-tests/
|
||||
BIN_DIR = $(CURDIR)/artifacts
|
||||
BINARY = $(BIN_DIR)/gocustomurls
|
||||
COVERAGE_DIR = $(CURDIR)/coverage
|
||||
CURRENT_DIR = $(shell pwd)
|
||||
CUR_TAG = $(shell git tag | sort -g | tail -1 | cut -c 2-)
|
||||
VERSION_NUMBER ?= 0.0.0
|
||||
|
@ -31,29 +30,3 @@ lint:
|
|||
.PHONY: build
|
||||
build:
|
||||
go build -o $(BINARY)
|
||||
|
||||
.PHONY: build-debug
|
||||
build-debug:
|
||||
mkdir -p $(BIN_DIR)
|
||||
go build -cover -o $(BINARY) .
|
||||
|
||||
.PHONY: test
|
||||
test: build-debug
|
||||
rm -rf $(COVERAGE_DIR)
|
||||
mkdir -p $(COVERAGE_DIR)
|
||||
go test -cover ./... -args -test.gocoverdir="$(COVERAGE_DIR)"
|
||||
|
||||
.PHONY: coverage-full
|
||||
coverage-full: test
|
||||
go tool covdata textfmt -i=$(COVERAGE_DIR) -o $(COVERAGE_DIR)/coverage.out
|
||||
go tool cover -func=$(COVERAGE_DIR)/coverage.out
|
||||
|
||||
.PHONY: coverage-integration
|
||||
coverage-integration:
|
||||
go test ./... -run Integration -covermode=count -coverprofile=$(COVERAGE_DIR)/integration.out
|
||||
go tool cover -func=$(COVERAGE_DIR)/integration.out
|
||||
|
||||
.PHONY: coverage-html
|
||||
coverage-html: coverage-full
|
||||
go tool cover -html=./coverage/coverage.out -o ./coverage/coverage.html
|
||||
# open ./coverage/coverage.html
|
59
README.md
59
README.md
|
@ -15,17 +15,6 @@ There are ways to do this with [nginx](https://www.nirenjan.com/2019/golang-vani
|
|||
|
||||
So I used golang for this project.
|
||||
|
||||
## Running
|
||||
|
||||
```sh
|
||||
$ ./artifacts/gocustomurls --conf <PATH_TO_CONF_FILE>
|
||||
```
|
||||
A sample config file is located [here](./sample/config.json)
|
||||
|
||||
## SystemD
|
||||
|
||||
A sample systemd file is located [here](./sample/gocustomurls.service)
|
||||
|
||||
## Testing
|
||||
|
||||
You can test with
|
||||
|
@ -34,7 +23,7 @@ You can test with
|
|||
|
||||
```sh
|
||||
$ http --body "https://{domain.name}/{package}?go-get=1"
|
||||
...Truncated output
|
||||
...truncated output
|
||||
```
|
||||
|
||||
(b) With the go-get command
|
||||
|
@ -45,16 +34,44 @@ get "jbowen.dev/cereal": found meta tag get.metaImport{Prefix:"jbowen.dev/cereal
|
|||
jbowen.dev/cereal (download)
|
||||
```
|
||||
|
||||
### Sample logrotate config
|
||||
|
||||
Initial
|
||||
|
||||
```json
|
||||
~/.gocustomurls/logs/app.log {
|
||||
size 20M
|
||||
copytruncate
|
||||
compress
|
||||
notifempty
|
||||
delaycompress
|
||||
}
|
||||
```
|
||||
|
||||
To fix this error
|
||||
|
||||
```sh
|
||||
rotating pattern: /home/bloguser/.gocustomurls/logs/app.log forced from command line (no old logs will be kept)
|
||||
empty log files are not rotated, old logs are removed
|
||||
considering log /home/bloguser/.gocustomurls/logs/app.log
|
||||
error: skipping "/home/bloguser/.gocustomurls/logs/app.log" because parent directory has insecure permissions (It's world writable or writable by group which is not "root") Set "su" directive in config file to tell logrotate which user/group should be used for rotation.
|
||||
```
|
||||
|
||||
do this instead
|
||||
|
||||
```json
|
||||
~/.gocustomurls/logs/app.log {
|
||||
su bloguser bloguser
|
||||
size 20M
|
||||
copytruncate
|
||||
compress
|
||||
notifempty
|
||||
delaycompress
|
||||
}
|
||||
```
|
||||
|
||||
## TODOs
|
||||
|
||||
* [x] Fix permission errors around opening the app.log and rules.json.
|
||||
* [x] Make the flags (config, rules) required instead of optional.
|
||||
* [x] ~~Figure how to use logrotate (a linux utility)~~
|
||||
* [x] Figure how to do log rotation as part of this app's function
|
||||
* [x] Add tests
|
||||
* [ ] Add systemd.service and explanation
|
||||
* [ ] Add Dockerfile and explanation
|
||||
* [ ] Figure how to use `goreleaser` [here](https://nfpm.goreleaser.com/) to release deb and rpm packages (so basically split it, goreleaser for github and woodpecker for Woodpecker)
|
||||
* [ ] Add mirror to Github
|
||||
* [ ] Add Github action
|
||||
* [ ] Update README.md
|
||||
* [ ] Figure how to use logrotate (a linux utility)
|
10
app.go
10
app.go
|
@ -3,7 +3,6 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Application struct {
|
||||
|
@ -12,7 +11,7 @@ type Application struct {
|
|||
Log *LogFile
|
||||
}
|
||||
|
||||
func (app *Application) Routes() {
|
||||
func (app *Application) routes() {
|
||||
m := http.NewServeMux()
|
||||
|
||||
m.HandleFunc("/healthcheck", healthcheck)
|
||||
|
@ -23,10 +22,9 @@ func (app *Application) Routes() {
|
|||
}
|
||||
|
||||
func (app *Application) Setup(port string) *http.Server {
|
||||
app.Routes()
|
||||
app.routes()
|
||||
return &http.Server{
|
||||
Addr: fmt.Sprintf(":%s", port),
|
||||
Handler: app.Mux,
|
||||
ReadTimeout: 2500 * time.Millisecond,
|
||||
Addr: fmt.Sprintf(":%s", port),
|
||||
Handler: app.Mux,
|
||||
}
|
||||
}
|
||||
|
|
79
bytesize.go
79
bytesize.go
|
@ -1,79 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var units []string = []string{"KB", "MB", "GB", "B"}
|
||||
|
||||
// ByteSize represents a number of bytes
|
||||
type ByteSize struct {
|
||||
HumanRep string
|
||||
NumberRep int64
|
||||
}
|
||||
|
||||
// Byte size size suffixes.
|
||||
const (
|
||||
B int64 = 1
|
||||
KB int64 = 1 << (10 * iota)
|
||||
MB
|
||||
GB
|
||||
)
|
||||
|
||||
// Used to convert user input to ByteSize
|
||||
var unitMap = map[string]int64{
|
||||
"B": B,
|
||||
"KB": KB,
|
||||
"MB": MB,
|
||||
"GB": GB,
|
||||
}
|
||||
|
||||
// Converts string representaiion of a sistring representaiion of sizeze into ByteSize
|
||||
func (b *ByteSize) ParseFromString(s string) error {
|
||||
s = strings.TrimSpace(s)
|
||||
b.HumanRep = s
|
||||
var fragments []string
|
||||
unitFound := ""
|
||||
|
||||
for _, unit := range units {
|
||||
fragments = strings.Split(s, unit)
|
||||
if len(fragments) == 2 {
|
||||
unitFound = unit
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(unitFound) == 0 {
|
||||
return fmt.Errorf("unrecognized size suffix")
|
||||
}
|
||||
|
||||
value, err := strconv.ParseFloat(fragments[0], 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
unit, ok := unitMap[strings.ToUpper(unitFound)]
|
||||
if !ok {
|
||||
return fmt.Errorf("unrecognized size suffix %s", fragments[1])
|
||||
}
|
||||
|
||||
b.NumberRep = int64(value * float64(unit))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Converts a number of bytes into ByteSize
|
||||
func (b *ByteSize) ParseFromNumber(n int64) {
|
||||
b.NumberRep = n
|
||||
bf := float64(n)
|
||||
for _, unit := range []string{"", "K", "M", "G"} {
|
||||
if math.Abs(bf) < 1024.0 {
|
||||
b.HumanRep = fmt.Sprintf("%3.1f%sB", bf, unit)
|
||||
return
|
||||
}
|
||||
bf /= 1024.0
|
||||
}
|
||||
b.HumanRep = fmt.Sprintf("%.1fTB", bf)
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParseFromString(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
input1 string
|
||||
input2 int64
|
||||
}{
|
||||
"KB": {input1: "5.5KB", input2: 5632},
|
||||
"MB": {input1: "6.7MB", input2: 7025459},
|
||||
"GB": {input1: "7.5GB", input2: 8053063680},
|
||||
}
|
||||
|
||||
for name, tc := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
by := ByteSize{}
|
||||
err := by.ParseFromString(tc.input1)
|
||||
assert.Equal(t, err, nil)
|
||||
assert.EqualValues(t, by.NumberRep, tc.input2)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFromNumber(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
input1 int64
|
||||
input2 string
|
||||
}{
|
||||
"KB": {input1: 528870, input2: "516.5KB"},
|
||||
"MB": {input1: 7025459, input2: "6.7MB"},
|
||||
"GB": {input1: 8053063680, input2: "7.5GB"},
|
||||
}
|
||||
|
||||
for name, tc := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
by := ByteSize{}
|
||||
by.ParseFromNumber(tc.input1)
|
||||
assert.EqualValues(t, by.HumanRep, tc.input2)
|
||||
})
|
||||
}
|
||||
}
|
118
conf.go
118
conf.go
|
@ -4,23 +4,11 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type ParsedConf struct {
|
||||
RulesFp string `json:"rulesPath"`
|
||||
LogFp string `json:"logPath"`
|
||||
Compression bool `json:"compress"`
|
||||
SizeToRotate string `json:"sizeToRotate"`
|
||||
Port string `json:"port"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
MappingFilePath string
|
||||
MappingRules ImportRulesMappings
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
type ImportRulesMappings struct {
|
||||
|
@ -74,109 +62,3 @@ func (c *Config) LoadMappingFile(fp string) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// generate default locations for all the config files
|
||||
func getDefaults() (map[string]string, error) {
|
||||
m := make(map[string]string)
|
||||
confDir, err := os.UserConfigDir()
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
homeDir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return m, err
|
||||
}
|
||||
m["rulesFp"] = filepath.Join(confDir, "gocustomcurls", "rules.json")
|
||||
m["confFp"] = filepath.Join(confDir, "gocustomcurls", "config.json")
|
||||
m["logFp"] = filepath.Join(homeDir, ".gocustomurls", "logs", "app.log")
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// generate and write to a location, the default values for the config.json file
|
||||
func generateDefaultConfigFile(defaultObj map[string]string) (ParsedConf, error) {
|
||||
var p ParsedConf
|
||||
var err error
|
||||
var defaults map[string]string
|
||||
if len(defaultObj) == 0 {
|
||||
defaults, err = getDefaults()
|
||||
} else {
|
||||
defaults = defaultObj
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
parentDir := filepath.Dir(defaults["confFp"])
|
||||
err = os.MkdirAll(parentDir, 0755)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
f, err := os.OpenFile(defaults["confFp"], os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
defer f.Close()
|
||||
p.RulesFp = defaults["rulesFp"]
|
||||
p.LogFp = defaults["logFp"]
|
||||
p.Port = "7070"
|
||||
p.Compression = true
|
||||
p.SizeToRotate = "5MiB"
|
||||
jsonString, _ := json.Marshal(p)
|
||||
err = os.WriteFile(defaults["confFp"], jsonString, 0666)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func checkIfSizeIsConfigured(fsize string) (bool, error) {
|
||||
suffixes := []string{"KB", "MB", "GB"}
|
||||
var found string
|
||||
for _, suffix := range suffixes {
|
||||
if strings.HasSuffix(fsize, suffix) {
|
||||
found = suffix
|
||||
}
|
||||
}
|
||||
if len(found) == 0 {
|
||||
return false, fmt.Errorf("%s has the incorrect suffix, Please use one of this suffixes {\"KB\", \"MB\", \"GB\"}", fsize)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// load the main config file
|
||||
func (c *Config) LoadMainConfigFile(fp string) (ParsedConf, error) {
|
||||
var conf ParsedConf
|
||||
var err error
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
ok := isFile(fp)
|
||||
if !ok {
|
||||
// generate config file
|
||||
errorLog.Println("Warning, generating default config file")
|
||||
conf, err = generateDefaultConfigFile(map[string]string{})
|
||||
if err != nil {
|
||||
return conf, err
|
||||
}
|
||||
c.MappingFilePath = conf.RulesFp
|
||||
return conf, nil
|
||||
}
|
||||
f, err := os.Open(fp)
|
||||
if err != nil {
|
||||
return conf, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
err = json.NewDecoder(f).Decode(&conf)
|
||||
if err != nil {
|
||||
return conf, err
|
||||
}
|
||||
|
||||
_, err = checkIfSizeIsConfigured(conf.SizeToRotate)
|
||||
if err != nil {
|
||||
return conf, err
|
||||
}
|
||||
|
||||
c.MappingFilePath = conf.RulesFp
|
||||
return conf, nil
|
||||
}
|
||||
|
|
154
conf_test.go
154
conf_test.go
|
@ -1,154 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestLoadMappingFile(t *testing.T) {
|
||||
|
||||
t.Run("load the mapping file correctly - initial load", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
mkDirForTest(t, fmt.Sprintf("%s/tmp", tmpDir))
|
||||
rulesJsonFp := "testData/rules.json"
|
||||
|
||||
expected := fmt.Sprintf("%s/tmp/rules.json", tmpDir)
|
||||
|
||||
cpFileForTest(t, rulesJsonFp, expected)
|
||||
|
||||
cfg := &Config{}
|
||||
|
||||
assert.Equal(t, cfg.MappingFilePath, "")
|
||||
assert.Empty(t, cfg.MappingRules)
|
||||
err := cfg.LoadMappingFile(expected)
|
||||
assert.Equal(t, err, nil)
|
||||
assert.Equal(t, cfg.MappingFilePath, expected)
|
||||
assert.NotEmpty(t, cfg.MappingRules)
|
||||
})
|
||||
|
||||
t.Run("load the mapping file correctly - reload", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
mkDirForTest(t, fmt.Sprintf("%s/tmp", tmpDir))
|
||||
rulesJsonFp := "testData/rules.json"
|
||||
|
||||
expected := fmt.Sprintf("%s/tmp/rules.json", tmpDir)
|
||||
|
||||
cpFileForTest(t, rulesJsonFp, expected)
|
||||
|
||||
cfg := &Config{}
|
||||
|
||||
assert.Equal(t, cfg.MappingFilePath, "")
|
||||
assert.Empty(t, cfg.MappingRules)
|
||||
err := cfg.LoadMappingFile(expected)
|
||||
assert.Equal(t, err, nil)
|
||||
assert.Equal(t, cfg.MappingFilePath, expected)
|
||||
assert.NotEmpty(t, cfg.MappingRules)
|
||||
oldRules := cfg.MappingRules
|
||||
|
||||
rulesJsonFp = "testData/rules2.json"
|
||||
|
||||
cpFileForTest(t, rulesJsonFp, expected)
|
||||
|
||||
err = cfg.LoadMappingFile(expected)
|
||||
assert.Equal(t, err, nil)
|
||||
assert.Equal(t, cfg.MappingFilePath, expected)
|
||||
assert.NotEmpty(t, cfg.MappingRules)
|
||||
|
||||
newRules := cfg.MappingRules
|
||||
|
||||
assert.NotEqualValues(t, oldRules, newRules)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetDefaults(t *testing.T) {
|
||||
actual, err := getDefaults()
|
||||
assert.Equal(t, err, nil)
|
||||
assert.Contains(t, actual, "rulesFp")
|
||||
assert.Contains(t, actual, "logFp")
|
||||
assert.Contains(t, actual, "confFp")
|
||||
}
|
||||
|
||||
func TestSizeIsConfigured(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
input string
|
||||
want bool
|
||||
}{
|
||||
"wrong input - K": {input: "5677.45K", want: false},
|
||||
"wrong input - KiB": {input: "5677.45KiB", want: false},
|
||||
"wrong input - M": {input: "9.45M", want: false},
|
||||
"wrong input - G": {input: "9.45G", want: false},
|
||||
"correct input - KB": {input: "5.45KB", want: true},
|
||||
"correct input - MB": {input: "5677.45MB", want: true},
|
||||
"correct input - GB": {input: "9.45GB", want: true},
|
||||
}
|
||||
|
||||
for name, tc := range tests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
got, _ := checkIfSizeIsConfigured(tc.input)
|
||||
assert.Equal(t, got, tc.want)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateConfigFile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
tempLogsDir := fmt.Sprintf("%s/tmp/logs", tmpDir)
|
||||
tempConfigDir := fmt.Sprintf("%s/tmp/config", tmpDir)
|
||||
|
||||
mkDirForTest(t, tempLogsDir)
|
||||
mkDirForTest(t, tempConfigDir)
|
||||
|
||||
defaultConfig := map[string]string{
|
||||
"rulesFp": fmt.Sprintf("%s/rules.json", tempConfigDir),
|
||||
"confFp": fmt.Sprintf("%s/config.json", tempConfigDir),
|
||||
"logFp": fmt.Sprintf("%s/app.log", tempLogsDir),
|
||||
}
|
||||
|
||||
ok := IsDirEmpty(t, tempConfigDir)
|
||||
assert.Equal(t, ok, true)
|
||||
ok = IsDirEmpty(t, tempLogsDir)
|
||||
assert.Equal(t, ok, true)
|
||||
|
||||
expected, err := generateDefaultConfigFile(defaultConfig)
|
||||
assert.Equal(t, err, nil)
|
||||
assert.NotEmpty(t, expected)
|
||||
|
||||
ok = IsDirEmpty(t, tempConfigDir)
|
||||
assert.Equal(t, ok, false)
|
||||
ok = IsDirEmpty(t, tempLogsDir)
|
||||
assert.Equal(t, ok, true)
|
||||
|
||||
}
|
||||
|
||||
func TestLoadMainConfigFileFromProvidedLocation(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
tempConfigDir := fmt.Sprintf("%s/tmp", tmpDir)
|
||||
tempLogsDir := fmt.Sprintf("%s/tmp/logs", tmpDir)
|
||||
|
||||
mkDirForTest(t, tempConfigDir)
|
||||
|
||||
defaultConfigJson := map[string]any{
|
||||
"rulesPath": fmt.Sprintf("%s/rules.json", tempConfigDir),
|
||||
"logPath": fmt.Sprintf("%s/app.log", tempLogsDir),
|
||||
"port": "9005",
|
||||
"compress": false,
|
||||
"sizeToRotate": "50MB",
|
||||
}
|
||||
|
||||
tempConfigFile := fmt.Sprintf("%s/confg.json", tempConfigDir)
|
||||
|
||||
writeJsonForTest(t, defaultConfigJson, tempConfigFile)
|
||||
|
||||
cfg := &Config{}
|
||||
assert.Equal(t, cfg.MappingFilePath, "")
|
||||
conf, err := cfg.LoadMainConfigFile(tempConfigFile)
|
||||
assert.Equal(t, err, nil)
|
||||
assert.NotEmpty(t, conf)
|
||||
assert.NotEmpty(t, cfg.MappingFilePath)
|
||||
}
|
4
embed.go
4
embed.go
|
@ -10,7 +10,7 @@ import (
|
|||
//go:embed templates/*
|
||||
var tmpls embed.FS
|
||||
|
||||
func getServeHtml() (*template.Template, error) {
|
||||
func GetServeHtml() (*template.Template, error) {
|
||||
var t *template.Template
|
||||
data, err := tmpls.ReadFile("templates/success.html")
|
||||
if err != nil {
|
||||
|
@ -19,7 +19,7 @@ func getServeHtml() (*template.Template, error) {
|
|||
return template.New("").Parse(string(data))
|
||||
}
|
||||
|
||||
func getDefaultHtml() ([]byte, error) {
|
||||
func GetDefaultHtml() ([]byte, error) {
|
||||
var data []byte
|
||||
var err error
|
||||
data, err = tmpls.ReadFile("templates/default.html")
|
||||
|
|
11
go.mod
11
go.mod
|
@ -1,14 +1,3 @@
|
|||
module gocustomurls
|
||||
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/stretchr/testify v1.9.0
|
||||
golang.org/x/net v0.26.0
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
12
go.sum
12
go.sum
|
@ -1,12 +0,0 @@
|
|||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
|
||||
golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
32
handlers.go
32
handlers.go
|
@ -3,7 +3,6 @@ package main
|
|||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
@ -16,7 +15,8 @@ func reloadRules(c *Config) http.HandlerFunc {
|
|||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
err := c.LoadMappingFile("")
|
||||
if err != nil {
|
||||
e := fmt.Errorf("cannot reload rules: %+v", err)
|
||||
e := fmt.Errorf("annot reload rules: %+v", err)
|
||||
// errorLog.Printf("Cannot reload rules: %+v", err)
|
||||
http.Error(w, e.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
@ -26,7 +26,6 @@ func reloadRules(c *Config) http.HandlerFunc {
|
|||
|
||||
func serveRules(c *Config) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
var nameOfPkg string
|
||||
if r.Method != http.MethodGet {
|
||||
http.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusInternalServerError)
|
||||
return
|
||||
|
@ -34,7 +33,7 @@ func serveRules(c *Config) http.HandlerFunc {
|
|||
|
||||
// if go-get param is absent, return nothing
|
||||
if r.FormValue("go-get") != "1" {
|
||||
data, err := getDefaultHtml()
|
||||
data, err := GetDefaultHtml()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
@ -43,44 +42,25 @@ func serveRules(c *Config) http.HandlerFunc {
|
|||
return
|
||||
}
|
||||
|
||||
ipFragments := strings.Split(r.Host, ":")
|
||||
if len(ipFragments) > 1 {
|
||||
ipAddr := net.ParseIP(ipFragments[0])
|
||||
if ipAddr.IsLoopback() {
|
||||
nameOfPkg = ipAddr.String() + r.URL.Path
|
||||
}
|
||||
} else {
|
||||
nameOfPkg = r.Host + r.URL.Path
|
||||
}
|
||||
nameOfPkg := r.Host + r.URL.Path
|
||||
|
||||
var found bool
|
||||
var vanityUrl, proto, repoUrl string
|
||||
for _, rule := range c.MappingRules.Mappings {
|
||||
if strings.HasPrefix(strings.ToLower(rule.VanityUrl+"/"), strings.Trim(strings.ToLower(nameOfPkg), " ")) {
|
||||
vanityUrl = rule.VanityUrl
|
||||
repoUrl = rule.RealUrl
|
||||
proto = rule.Protocol
|
||||
found = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
data, err := getDefaultHtml()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Write(data)
|
||||
return
|
||||
}
|
||||
|
||||
d := ImportRuleStruct{
|
||||
VanityUrl: vanityUrl,
|
||||
Proto: proto,
|
||||
RepoUrl: repoUrl,
|
||||
}
|
||||
tmpl, err := getServeHtml()
|
||||
tmpl, err := GetServeHtml()
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
|
|
263
handlers_test.go
263
handlers_test.go
|
@ -1,263 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/net/html"
|
||||
)
|
||||
|
||||
func TestIntegrationReloadRules(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping reload rules integration test")
|
||||
}
|
||||
|
||||
c := &Config{}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
tempLogsDir := fmt.Sprintf("%s/tmp/logs", tmpDir)
|
||||
tempConfigDir := fmt.Sprintf("%s/tmp/config", tmpDir)
|
||||
|
||||
confFile := fmt.Sprintf("%s/config.json", tempConfigDir)
|
||||
rulesFile := fmt.Sprintf("%s/rules.json", tempConfigDir)
|
||||
lFp := fmt.Sprintf("%s/app.log", tempLogsDir)
|
||||
|
||||
mkDirForTest(t, tempConfigDir)
|
||||
mkDirForTest(t, tempLogsDir)
|
||||
|
||||
rulesJsonFp := "testData/rules.json"
|
||||
cpFileForTest(t, rulesJsonFp, rulesFile)
|
||||
|
||||
p := ParsedConf{
|
||||
RulesFp: rulesFile,
|
||||
LogFp: lFp,
|
||||
Port: "9050",
|
||||
Compression: true,
|
||||
SizeToRotate: "5MB",
|
||||
}
|
||||
jsonString, _ := json.Marshal(p)
|
||||
writeForTest(t, confFile, jsonString)
|
||||
|
||||
_, err := c.LoadMainConfigFile(confFile)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
l, err := newFileLogger(p.LogFp, p.SizeToRotate, p.Compression)
|
||||
assert.NotEmpty(t, l)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
isEmpty := isFileEmpty(t, lFp)
|
||||
assert.Equal(t, isEmpty, true)
|
||||
|
||||
app := newTestApp(t, c, l)
|
||||
app.Routes()
|
||||
ts := newTestServer(t, app.Mux)
|
||||
defer ts.Close()
|
||||
|
||||
code, _, body := ts.get(t, "/reloadRules")
|
||||
assert.Equal(t, code, http.StatusOK)
|
||||
assert.Equal(t, string(body), "ok")
|
||||
|
||||
isEmpty = isFileEmpty(t, lFp)
|
||||
assert.Equal(t, isEmpty, true)
|
||||
|
||||
oldRules := app.Config.MappingRules.Mappings
|
||||
assert.NotEmpty(t, oldRules)
|
||||
|
||||
nRulesJsonFp := "testData/rules2.json"
|
||||
cpFileForTest(t, nRulesJsonFp, rulesFile)
|
||||
|
||||
code, _, body = ts.get(t, "/reloadRules")
|
||||
assert.Equal(t, code, http.StatusOK)
|
||||
assert.Equal(t, string(body), "ok")
|
||||
|
||||
isEmpty = isFileEmpty(t, lFp)
|
||||
assert.Equal(t, isEmpty, true)
|
||||
|
||||
newRules := app.Config.MappingRules.Mappings
|
||||
assert.NotEmpty(t, newRules)
|
||||
|
||||
assert.NotEqualValues(t, oldRules, newRules)
|
||||
}
|
||||
|
||||
func TestIntegrationServeRules(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping serve rules integration test")
|
||||
}
|
||||
|
||||
t.Run("return default html if go-get is missing", func(t *testing.T) {
|
||||
c := &Config{}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
tempLogsDir := fmt.Sprintf("%s/tmp/logs", tmpDir)
|
||||
tempConfigDir := fmt.Sprintf("%s/tmp/config", tmpDir)
|
||||
|
||||
confFile := fmt.Sprintf("%s/config.json", tempConfigDir)
|
||||
rulesFile := fmt.Sprintf("%s/rules.json", tempConfigDir)
|
||||
lFp := fmt.Sprintf("%s/app.log", tempLogsDir)
|
||||
|
||||
mkDirForTest(t, tempConfigDir)
|
||||
mkDirForTest(t, tempLogsDir)
|
||||
|
||||
rulesJsonFp := "testData/rules2.json"
|
||||
cpFileForTest(t, rulesJsonFp, rulesFile)
|
||||
|
||||
p := ParsedConf{
|
||||
RulesFp: rulesFile,
|
||||
LogFp: lFp,
|
||||
Port: "9050",
|
||||
Compression: true,
|
||||
SizeToRotate: "5MB",
|
||||
}
|
||||
jsonString, _ := json.Marshal(p)
|
||||
writeForTest(t, confFile, jsonString)
|
||||
|
||||
_, err := c.LoadMainConfigFile(confFile)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
err = c.LoadMappingFile(p.RulesFp)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
l, err := newFileLogger(p.LogFp, p.SizeToRotate, p.Compression)
|
||||
assert.NotEmpty(t, l)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
isEmpty := isFileEmpty(t, lFp)
|
||||
assert.Equal(t, isEmpty, true)
|
||||
|
||||
app := newTestApp(t, c, l)
|
||||
app.Routes()
|
||||
|
||||
ts := newTestServer(t, app.Mux)
|
||||
defer ts.Close()
|
||||
|
||||
code, _, body := ts.get(t, "/touche")
|
||||
assert.Equal(t, code, http.StatusOK)
|
||||
expected, _ := getDefaultHtml()
|
||||
assert.Equal(t, string(body), string(expected))
|
||||
|
||||
isEmpty = isFileEmpty(t, lFp)
|
||||
assert.Equal(t, isEmpty, false)
|
||||
|
||||
})
|
||||
|
||||
t.Run("return normal html if go-get is included and the package exists", func(t *testing.T) {
|
||||
c := &Config{}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
tempLogsDir := fmt.Sprintf("%s/tmp/logs", tmpDir)
|
||||
tempConfigDir := fmt.Sprintf("%s/tmp/config", tmpDir)
|
||||
|
||||
confFile := fmt.Sprintf("%s/config.json", tempConfigDir)
|
||||
rulesFile := fmt.Sprintf("%s/rules.json", tempConfigDir)
|
||||
lFp := fmt.Sprintf("%s/app.log", tempLogsDir)
|
||||
|
||||
mkDirForTest(t, tempConfigDir)
|
||||
mkDirForTest(t, tempLogsDir)
|
||||
|
||||
rulesJsonFp := "testData/rules3.json"
|
||||
cpFileForTest(t, rulesJsonFp, rulesFile)
|
||||
|
||||
p := ParsedConf{
|
||||
RulesFp: rulesFile,
|
||||
LogFp: lFp,
|
||||
Port: "9050",
|
||||
Compression: true,
|
||||
SizeToRotate: "5MB",
|
||||
}
|
||||
jsonString, _ := json.Marshal(p)
|
||||
writeForTest(t, confFile, jsonString)
|
||||
|
||||
_, err := c.LoadMainConfigFile(confFile)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
err = c.LoadMappingFile(p.RulesFp)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
l, err := newFileLogger(p.LogFp, p.SizeToRotate, p.Compression)
|
||||
assert.NotEmpty(t, l)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
isEmpty := isFileEmpty(t, lFp)
|
||||
assert.Equal(t, isEmpty, true)
|
||||
|
||||
app := newTestApp(t, c, l)
|
||||
app.Routes()
|
||||
ts := newTestServer(t, app.Mux)
|
||||
defer ts.Close()
|
||||
|
||||
code, _, body := ts.get(t, "/x/touche?go-get=1")
|
||||
assert.Equal(t, code, http.StatusOK)
|
||||
assert.NotEmpty(t, string(body))
|
||||
|
||||
_, err = html.Parse(strings.NewReader(string(body)))
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
t.Logf("Printing returned html => %s", string(body))
|
||||
isEmpty = isFileEmpty(t, lFp)
|
||||
assert.Equal(t, isEmpty, false)
|
||||
})
|
||||
|
||||
t.Run("return default html if go-get is included and the package does not exists", func(t *testing.T) {
|
||||
c := &Config{}
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
tempLogsDir := fmt.Sprintf("%s/tmp/logs", tmpDir)
|
||||
tempConfigDir := fmt.Sprintf("%s/tmp/config", tmpDir)
|
||||
|
||||
confFile := fmt.Sprintf("%s/config.json", tempConfigDir)
|
||||
rulesFile := fmt.Sprintf("%s/rules.json", tempConfigDir)
|
||||
lFp := fmt.Sprintf("%s/app.log", tempLogsDir)
|
||||
|
||||
mkDirForTest(t, tempConfigDir)
|
||||
mkDirForTest(t, tempLogsDir)
|
||||
|
||||
rulesJsonFp := "testData/rules3.json"
|
||||
cpFileForTest(t, rulesJsonFp, rulesFile)
|
||||
|
||||
p := ParsedConf{
|
||||
RulesFp: rulesFile,
|
||||
LogFp: lFp,
|
||||
Port: "9050",
|
||||
Compression: true,
|
||||
SizeToRotate: "5MB",
|
||||
}
|
||||
jsonString, _ := json.Marshal(p)
|
||||
writeForTest(t, confFile, jsonString)
|
||||
|
||||
_, err := c.LoadMainConfigFile(confFile)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
err = c.LoadMappingFile(p.RulesFp)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
l, err := newFileLogger(p.LogFp, p.SizeToRotate, p.Compression)
|
||||
assert.NotEmpty(t, l)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
isEmpty := isFileEmpty(t, lFp)
|
||||
assert.Equal(t, isEmpty, true)
|
||||
|
||||
app := newTestApp(t, c, l)
|
||||
app.Routes()
|
||||
ts := newTestServer(t, app.Mux)
|
||||
defer ts.Close()
|
||||
|
||||
code, _, body := ts.get(t, "/x/fuckoff?go-get=1")
|
||||
assert.Equal(t, code, http.StatusOK)
|
||||
expected, _ := getDefaultHtml()
|
||||
assert.Equal(t, string(body), string(expected))
|
||||
|
||||
isEmpty = isFileEmpty(t, lFp)
|
||||
assert.Equal(t, isEmpty, false)
|
||||
|
||||
t.Logf("Printing returned html => %s", string(body))
|
||||
})
|
||||
}
|
205
logger.go
205
logger.go
|
@ -1,17 +1,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
@ -45,13 +40,9 @@ var (
|
|||
)
|
||||
|
||||
type LogFile struct {
|
||||
handle *os.File
|
||||
logger *log.Logger
|
||||
path string
|
||||
fileLock sync.Mutex
|
||||
canCompress bool
|
||||
maxSize ByteSize
|
||||
curSize ByteSize
|
||||
handle *os.File
|
||||
logger *log.Logger
|
||||
path string
|
||||
}
|
||||
|
||||
type LogFileRec struct {
|
||||
|
@ -60,173 +51,28 @@ type LogFileRec struct {
|
|||
Url string `json:"url"`
|
||||
}
|
||||
|
||||
func (lf *LogFile) MakeCopyTo(dst string) error {
|
||||
var err error
|
||||
r, err := os.Open(lf.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
w, err := os.OpenFile(dst, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if c := w.Close(); err == nil {
|
||||
err = c
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = io.Copy(w, r)
|
||||
return err
|
||||
}
|
||||
|
||||
func (lf *LogFile) Truncate() error {
|
||||
fd, err := os.OpenFile(lf.path, os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not open file %q for truncation: %v", lf.path, err)
|
||||
}
|
||||
err = fd.Close()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not close file handler for %q after truncation: %v", lf.path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func compressOldFile(fname string) error {
|
||||
reader, err := os.Open(fname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("compressOldFile: failed to open existing file %s: %w", fname, err)
|
||||
}
|
||||
defer reader.Close()
|
||||
|
||||
buffer := bufio.NewReader(reader)
|
||||
fnameGz := fname + ".gz"
|
||||
fw, err := os.OpenFile(fnameGz, os.O_WRONLY|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
return fmt.Errorf("compressOldFile: failed to open new file %s: %w", fnameGz, err)
|
||||
}
|
||||
defer fw.Close()
|
||||
|
||||
zw, err := gzip.NewWriterLevel(fw, gzip.BestCompression)
|
||||
if err != nil {
|
||||
return fmt.Errorf("compressOldFile: failed to create gzip writer: %w", err)
|
||||
}
|
||||
defer zw.Close()
|
||||
|
||||
_, err = buffer.WriteTo(zw)
|
||||
if err != nil {
|
||||
_ = zw.Close()
|
||||
_ = fw.Close()
|
||||
return fmt.Errorf("compressOldFile: failed to write to gz file: %w", err)
|
||||
}
|
||||
_ = reader.Close()
|
||||
|
||||
err = os.Remove(fname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("compressOldFile: failed to delete old file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lf *LogFile) Rotate() error {
|
||||
|
||||
// new file
|
||||
newFilePrefix := fmt.Sprintf("%s.%s", lf.handle.Name(), time.Now().Format("2006-01-02"))
|
||||
|
||||
// close file to allow for read-only access
|
||||
err := lf.handle.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// make a copy of the old log file
|
||||
err = lf.MakeCopyTo(newFilePrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// compress the new log file
|
||||
if lf.canCompress {
|
||||
err = compressOldFile(newFilePrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Truncate the old log file
|
||||
err = lf.Truncate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(lf.path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lf.handle = f
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lf *LogFile) Open() error {
|
||||
lf.fileLock.Lock()
|
||||
defer lf.fileLock.Unlock()
|
||||
f, err := os.OpenFile(lf.path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lf.handle = f
|
||||
finfo, err := f.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
curSize := finfo.Size()
|
||||
if lf.maxSize.NumberRep != 0 && curSize >= lf.maxSize.NumberRep {
|
||||
err = lf.Rotate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
lf.logger = log.New(f, "", 0)
|
||||
finfo, err = lf.handle.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
by := ByteSize{}
|
||||
by.ParseFromNumber(finfo.Size())
|
||||
lf.curSize = by
|
||||
return nil
|
||||
}
|
||||
|
||||
func newFileLogger(path string, maxSize string, canCompress bool) (*LogFile, error) {
|
||||
func newFileLogger(path string) (*LogFile, error) {
|
||||
requestedFile := filepath.Clean(filepath.Join("/", path))
|
||||
parentDir := filepath.Dir(requestedFile)
|
||||
err := os.MkdirAll(parentDir, 0755)
|
||||
err := os.MkdirAll(parentDir, 0777)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
by := ByteSize{}
|
||||
err = by.ParseFromString(maxSize)
|
||||
f, err := os.OpenFile(requestedFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
lf := &LogFile{
|
||||
path: path,
|
||||
canCompress: canCompress,
|
||||
maxSize: by,
|
||||
}
|
||||
err = lf.Open()
|
||||
return lf, err
|
||||
return &LogFile{
|
||||
handle: f,
|
||||
logger: log.New(f, "", 0),
|
||||
path: path,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *LogFile) Close() error {
|
||||
if f == nil {
|
||||
return nil
|
||||
}
|
||||
f.fileLock.Lock()
|
||||
defer f.fileLock.Unlock()
|
||||
err := f.handle.Close()
|
||||
f.handle = nil
|
||||
return err
|
||||
|
@ -290,13 +136,10 @@ func getCurrentDate() string {
|
|||
return dt.Format(time.RFC3339Nano)
|
||||
}
|
||||
|
||||
func (lf *LogFile) WriteLog(r *http.Request) error {
|
||||
if lf == nil {
|
||||
func (f *LogFile) WriteLog(r *http.Request) error {
|
||||
if f == nil {
|
||||
return nil
|
||||
}
|
||||
lf.fileLock.Lock()
|
||||
defer lf.fileLock.Unlock()
|
||||
|
||||
var rec = make(map[string]string)
|
||||
rec["method"] = r.Method
|
||||
rec["requestUri"] = r.RequestURI
|
||||
|
@ -314,26 +157,6 @@ func (lf *LogFile) WriteLog(r *http.Request) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lf.logger.Println(string(b))
|
||||
|
||||
finfo, err := lf.handle.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
curSize := finfo.Size()
|
||||
|
||||
if lf.maxSize.NumberRep != 0 && curSize > lf.maxSize.NumberRep {
|
||||
err = lf.Rotate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
finfo, err = lf.handle.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
by := ByteSize{}
|
||||
by.ParseFromNumber(finfo.Size())
|
||||
lf.curSize = by
|
||||
f.logger.Println(string(b))
|
||||
return nil
|
||||
}
|
||||
|
|
257
logger_test.go
257
logger_test.go
|
@ -1,257 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestTruncate(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
mkDirForTest(t, fmt.Sprintf("%s/tmp", tmpDir))
|
||||
rulesJsonFp := "testData/app_over_size.log"
|
||||
|
||||
tmpLf := fmt.Sprintf("%s/tmp/app.log", tmpDir)
|
||||
|
||||
cpFileForTest(t, rulesJsonFp, tmpLf)
|
||||
|
||||
lf := &LogFile{
|
||||
path: tmpLf,
|
||||
}
|
||||
|
||||
lf.path = tmpLf
|
||||
// Continue from here
|
||||
isEmpty := isFileEmpty(t, tmpLf)
|
||||
assert.Equal(t, isEmpty, false)
|
||||
|
||||
err := lf.Truncate()
|
||||
assert.Equal(t, err, nil)
|
||||
isEmpty = isFileEmpty(t, tmpLf)
|
||||
assert.Equal(t, isEmpty, true)
|
||||
}
|
||||
|
||||
func TestMakeCopyTo(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
mkDirForTest(t, fmt.Sprintf("%s/tmp", tmpDir))
|
||||
rulesJsonFp := "testData/app_over_size.log"
|
||||
|
||||
tmpLf := fmt.Sprintf("%s/tmp/app.log", tmpDir)
|
||||
|
||||
cpFileForTest(t, rulesJsonFp, tmpLf)
|
||||
|
||||
newLocation := fmt.Sprintf("%s/tmp/app.1.log", tmpDir)
|
||||
|
||||
lf := &LogFile{
|
||||
path: tmpLf,
|
||||
}
|
||||
|
||||
lf.path = tmpLf
|
||||
|
||||
isEmpty := isFileEmpty(t, tmpLf)
|
||||
assert.Equal(t, isEmpty, false)
|
||||
|
||||
err := lf.MakeCopyTo(newLocation)
|
||||
assert.Equal(t, err, nil)
|
||||
isEmpty = isFileEmpty(t, tmpLf)
|
||||
assert.Equal(t, isEmpty, false)
|
||||
ok := areFilesTheSame(t, lf.path, newLocation)
|
||||
assert.Equal(t, ok, true)
|
||||
}
|
||||
|
||||
func TestCompressFile(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
mkDirForTest(t, fmt.Sprintf("%s/tmp", tmpDir))
|
||||
rulesJsonFp := "testData/app_over_size.log"
|
||||
|
||||
tmpLf := fmt.Sprintf("%s/tmp/app.log", tmpDir)
|
||||
|
||||
cpFileForTest(t, rulesJsonFp, tmpLf)
|
||||
|
||||
expected := fmt.Sprintf("%s/tmp/app.log.gz", tmpDir)
|
||||
|
||||
err := compressOldFile(tmpLf)
|
||||
assert.Equal(t, err, nil)
|
||||
exists := doesFileExist(tmpLf)
|
||||
assert.Equal(t, exists, false)
|
||||
exists = doesFileExist(expected)
|
||||
assert.Equal(t, exists, true)
|
||||
}
|
||||
|
||||
func TestRotate(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
mkDirForTest(t, fmt.Sprintf("%s/tmp", tmpDir))
|
||||
rulesJsonFp := "testData/app_over_size.log"
|
||||
|
||||
tmpLf := fmt.Sprintf("%s/tmp/app.log", tmpDir)
|
||||
|
||||
cpFileForTest(t, rulesJsonFp, tmpLf)
|
||||
|
||||
lf := &LogFile{
|
||||
path: tmpLf,
|
||||
canCompress: true,
|
||||
}
|
||||
|
||||
fd, _ := os.Open(tmpLf)
|
||||
lf.handle = fd
|
||||
|
||||
isEmpty := isFileEmpty(t, tmpLf)
|
||||
assert.Equal(t, isEmpty, false)
|
||||
err := lf.Rotate()
|
||||
assert.Equal(t, err, nil)
|
||||
exists := doesFileExist(tmpLf)
|
||||
assert.Equal(t, exists, true)
|
||||
isEmpty = isFileEmpty(t, tmpLf)
|
||||
assert.Equal(t, isEmpty, true)
|
||||
|
||||
expected := walkMatch(t, tmpDir, "*.gz")
|
||||
assert.NotEmpty(t, expected)
|
||||
t.Logf("%+v", expected)
|
||||
|
||||
assert.NotEqual(t, lf.handle, fd)
|
||||
}
|
||||
|
||||
func TestNewLogger(t *testing.T) {
|
||||
t.Run("load logging file - do not Rotate", func(t *testing.T) {
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
mkDirForTest(t, fmt.Sprintf("%s/tmp", tmpDir))
|
||||
rulesJsonFp := "testData/app_under_size.log"
|
||||
|
||||
tmpLf := fmt.Sprintf("%s/tmp/app.log", tmpDir)
|
||||
|
||||
cpFileForTest(t, rulesJsonFp, tmpLf)
|
||||
|
||||
lf, err := newFileLogger(tmpLf, "6KB", true)
|
||||
expected := walkMatch(t, tmpDir, "*.gz")
|
||||
assert.Equal(t, err, nil)
|
||||
assert.Empty(t, expected)
|
||||
assert.Equal(t, lf.path, tmpLf)
|
||||
assert.NotEmpty(t, lf.handle)
|
||||
assert.NotEmpty(t, lf.logger)
|
||||
|
||||
assert.FileExists(t, tmpLf)
|
||||
isEmpty := isFileEmpty(t, tmpLf)
|
||||
assert.Equal(t, isEmpty, false)
|
||||
})
|
||||
|
||||
t.Run("load logging file - Rotate", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
mkDirForTest(t, fmt.Sprintf("%s/tmp", tmpDir))
|
||||
rulesJsonFp := "testData/app_over_size.log"
|
||||
|
||||
tmpLf := fmt.Sprintf("%s/tmp/app.log", tmpDir)
|
||||
cpFileForTest(t, rulesJsonFp, tmpLf)
|
||||
|
||||
lf, err := newFileLogger(tmpLf, "4KB", true)
|
||||
expected := walkMatch(t, tmpDir, "*.gz")
|
||||
assert.Equal(t, err, nil)
|
||||
assert.NotEmpty(t, expected)
|
||||
assert.Equal(t, lf.path, tmpLf)
|
||||
assert.NotEmpty(t, lf.handle)
|
||||
assert.NotEmpty(t, lf.logger)
|
||||
|
||||
assert.FileExists(t, tmpLf)
|
||||
isEmpty := isFileEmpty(t, tmpLf)
|
||||
assert.Equal(t, isEmpty, true)
|
||||
})
|
||||
|
||||
t.Run("create new logging file", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
tmpLf := fmt.Sprintf("%s/tmp/app.log", tmpDir)
|
||||
|
||||
lf, err := newFileLogger(tmpLf, "4KB", true)
|
||||
|
||||
expected := walkMatch(t, tmpDir, "*.gz")
|
||||
assert.Equal(t, err, nil)
|
||||
assert.Empty(t, expected)
|
||||
assert.Equal(t, lf.path, tmpLf)
|
||||
assert.NotEmpty(t, lf.handle)
|
||||
assert.NotEmpty(t, lf.logger)
|
||||
|
||||
assert.FileExists(t, tmpLf)
|
||||
isEmpty := isFileEmpty(t, tmpLf)
|
||||
assert.Equal(t, isEmpty, true)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWriteLog(t *testing.T) {
|
||||
t.Run("write to logging file - do not Rotate", func(t *testing.T) {
|
||||
req := httptest.NewRequest(http.MethodGet, "/{package}?go-get=1", nil)
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
tmpLf := fmt.Sprintf("%s/tmp/app.log", tmpDir)
|
||||
|
||||
lf, err := newFileLogger(tmpLf, "4KB", true)
|
||||
assert.Equal(t, err, nil)
|
||||
assert.FileExists(t, tmpLf)
|
||||
isEmpty := isFileEmpty(t, tmpLf)
|
||||
assert.Equal(t, isEmpty, true)
|
||||
|
||||
err = lf.WriteLog(req)
|
||||
assert.Equal(t, err, nil)
|
||||
|
||||
expected := walkMatch(t, tmpDir, "*.gz")
|
||||
assert.Empty(t, expected)
|
||||
|
||||
isEmpty = isFileEmpty(t, tmpLf)
|
||||
assert.Equal(t, isEmpty, false)
|
||||
|
||||
b := readTestFile(t, tmpLf)
|
||||
m := make(map[string]string)
|
||||
_ = json.Unmarshal(b, &m)
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
assert.Contains(t, keys, "requestUri")
|
||||
assert.Contains(t, keys, "Host")
|
||||
assert.Contains(t, keys, "method")
|
||||
assert.Contains(t, keys, "ipAddr")
|
||||
assert.Contains(t, keys, "requestDate")
|
||||
})
|
||||
|
||||
t.Run("write to logging file - Rotate", func(t *testing.T) {
|
||||
req := httptest.NewRequest(http.MethodGet, "/{package}?go-get=1", nil)
|
||||
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
mkDirForTest(t, fmt.Sprintf("%s/tmp", tmpDir))
|
||||
rulesJsonFp := "testData/app_under_size.log"
|
||||
|
||||
tmpLf := fmt.Sprintf("%s/tmp/app.log", tmpDir)
|
||||
cpFileForTest(t, rulesJsonFp, tmpLf)
|
||||
|
||||
lf, err := newFileLogger(tmpLf, "5KB", true)
|
||||
assert.Equal(t, err, nil)
|
||||
assert.FileExists(t, tmpLf)
|
||||
isEmpty := isFileEmpty(t, tmpLf)
|
||||
assert.Equal(t, isEmpty, false)
|
||||
|
||||
err = lf.WriteLog(req)
|
||||
assert.Equal(t, err, nil)
|
||||
err = lf.WriteLog(req)
|
||||
assert.Equal(t, err, nil)
|
||||
// t.Logf("%s\n", lf.curSize)
|
||||
|
||||
expected := walkMatch(t, tmpDir, "*.gz")
|
||||
assert.NotEmpty(t, expected)
|
||||
|
||||
assert.FileExists(t, tmpLf)
|
||||
isEmpty = isFileEmpty(t, tmpLf)
|
||||
assert.Equal(t, isEmpty, true)
|
||||
|
||||
})
|
||||
}
|
75
main.go
75
main.go
|
@ -9,6 +9,7 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
@ -31,6 +32,29 @@ func flagsSet(flags *flag.FlagSet) map[string]bool {
|
|||
return s
|
||||
}
|
||||
|
||||
// generateDefaults - generate the default values for the rules.json and log files
|
||||
func generateDefaults(rulesfp string, logfp string) (string, string, error) {
|
||||
var newlogfp, newrulesfp string
|
||||
var err error
|
||||
newlogfp = logfp
|
||||
newrulesfp = rulesfp
|
||||
if len(newrulesfp) == 0 {
|
||||
dir, err := os.UserConfigDir()
|
||||
if err != nil {
|
||||
return newrulesfp, newlogfp, err
|
||||
}
|
||||
newrulesfp = filepath.Join(dir, "gocustomcurls", "rules.json")
|
||||
}
|
||||
if len(newlogfp) == 0 {
|
||||
dir, err := os.UserHomeDir()
|
||||
if err != nil {
|
||||
return newrulesfp, newlogfp, err
|
||||
}
|
||||
newlogfp = filepath.Join(dir, ".gocustomurls", "logs", "app.log")
|
||||
}
|
||||
return newrulesfp, newlogfp, err
|
||||
}
|
||||
|
||||
// isValidPort returns true if the port is valid
|
||||
// following the RFC https://datatracker.ietf.org/doc/html/rfc6056#section-2.1
|
||||
func isValidPort(port int) bool {
|
||||
|
@ -50,8 +74,9 @@ func main() {
|
|||
flags.PrintDefaults()
|
||||
}
|
||||
|
||||
confFlag := flags.String("conf", "", "Required. Contains all the configurations options")
|
||||
|
||||
portFlag := flags.String("port", "7070", "Optional. Default port is 7070. Port to listen to")
|
||||
rulesFileFlag := flags.String("config", "", "Optional. Contains go-import mapping")
|
||||
logFileFlag := flags.String("logfile", "", "Optional. Default log file")
|
||||
flags.Parse(os.Args[1:])
|
||||
|
||||
if len(flags.Args()) > 1 {
|
||||
|
@ -62,23 +87,14 @@ func main() {
|
|||
|
||||
allSetFlags := flagsSet(flags)
|
||||
|
||||
if !allSetFlags["conf"] {
|
||||
errorLog.Println("Error: conf arguments must be set")
|
||||
flags.Usage()
|
||||
os.Exit(1)
|
||||
var port string
|
||||
if allSetFlags["port"] {
|
||||
port = *portFlag
|
||||
} else {
|
||||
port = "7070"
|
||||
}
|
||||
|
||||
// TODO: Use only one flag conf with a conf file that
|
||||
// contains the following configuration, port, logfile, rulesfile, sizeofRotation
|
||||
conf := *confFlag
|
||||
c := &Config{}
|
||||
pConf, err := c.LoadMainConfigFile(conf)
|
||||
if err != nil {
|
||||
errorLog.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
p, err := strconv.Atoi(pConf.Port)
|
||||
p, err := strconv.Atoi(port)
|
||||
if err != nil {
|
||||
errorLog.Println(err)
|
||||
os.Exit(1)
|
||||
|
@ -89,12 +105,31 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = c.LoadMappingFile(pConf.RulesFp)
|
||||
var rulesFile string
|
||||
|
||||
if allSetFlags["config"] {
|
||||
rulesFile = *rulesFileFlag
|
||||
}
|
||||
|
||||
var logFile string
|
||||
if allSetFlags["logFile"] {
|
||||
logFile = *logFileFlag
|
||||
}
|
||||
|
||||
rFile, lFile, err := generateDefaults(logFile, rulesFile)
|
||||
if err != nil {
|
||||
errorLog.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
l, err := newFileLogger(pConf.LogFp, pConf.SizeToRotate, pConf.Compression)
|
||||
|
||||
// load rules mapping
|
||||
c := &Config{}
|
||||
err = c.LoadMappingFile(rFile)
|
||||
if err != nil {
|
||||
errorLog.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
l, err := newFileLogger(lFile)
|
||||
if err != nil {
|
||||
errorLog.Println(err)
|
||||
os.Exit(1)
|
||||
|
@ -104,7 +139,7 @@ func main() {
|
|||
Config: c,
|
||||
Log: l,
|
||||
}
|
||||
srv := app.Setup(pConf.Port)
|
||||
srv := app.Setup(port)
|
||||
|
||||
// For graceful shutdowns
|
||||
go func() {
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
"rulesPath":"HOME/rules.json",
|
||||
"logPath":"HOME/app.log",
|
||||
"compress":false,
|
||||
"sizeToRotate":"1kB",
|
||||
"port":"9999"
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
[Unit]
|
||||
Description=A custom url mapper for go packages!
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
ExecStart=/opt/gocustomurls --conf $HOME/app.conf
|
||||
Restart=always
|
||||
SyslogIdentifier=gocustomurls
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,66 +0,0 @@
|
|||
Stopped serving new connections.
|
||||
Graceful shutdown complete.
|
||||
Stopped serving new connections.
|
||||
Graceful shutdown complete.
|
||||
gocustomurls: Starting
|
||||
Stopped serving new connections.
|
||||
Graceful shutdown complete.
|
||||
gocustomurls: Starting
|
||||
Stopped serving new connections.
|
||||
Graceful shutdown complete.
|
||||
2024-05-28T14:29:35.767371211-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:42408","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T14:33:59.491487682-04:00 Stopped serving new connections.
|
||||
2024-05-28T14:33:59.491691358-04:00 Graceful shutdown complete.
|
||||
2024-05-28T14:48:29.818981344-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:39974","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T15:17:42.692146237-04:00 Stopped serving new connections.
|
||||
2024-05-28T15:17:42.692268027-04:00 Graceful shutdown complete.
|
||||
2024-05-28T15:18:11.625875598-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:38550","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:49398","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T15:40:09.870951532-04:00 Stopped serving new connections.
|
||||
2024-05-28T15:40:09.871085499-04:00 Graceful shutdown complete.
|
||||
2024-05-28T15:40:24.154320042-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:58664","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T16:52:38.449545322-04:00 Stopped serving new connections.
|
||||
2024-05-28T16:52:38.449782355-04:00 Graceful shutdown complete.
|
||||
2024-05-28T16:52:47.19006644-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:35120","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T16:54:39.537910226-04:00 Stopped serving new connections.
|
||||
2024-05-28T16:54:39.538042526-04:00 Graceful shutdown complete.
|
||||
2024-05-28T16:54:43.991755643-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:42694","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T17:00:37.549759919-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:00:37.549895593-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:00:39.564002213-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:40190","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T17:01:53.902149407-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:01:53.902326423-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:01:55.853543897-04:00 Starting
|
||||
2024-05-28T17:01:57.187935277-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:01:57.188065162-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:02:02.342827445-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:55876","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T17:04:36.661802337-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:04:36.661918402-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:04:42.004255484-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:42466","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T17:13:11.636985677-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:13:11.637150614-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:13:19.225477323-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:35882","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T17:13:37.590697118-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:13:37.59075443-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:14:30.964387887-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:35074","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T17:14:40.412222772-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:14:40.412407892-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:19:09.579082129-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:52204","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T17:19:32.491870213-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:19:32.492012026-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:20:12.700323661-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:42938","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T18:08:26.484225491-04:00 Stopped serving new connections.
|
||||
2024-05-28T18:08:26.484389145-04:00 Graceful shutdown complete.
|
|
@ -1,56 +0,0 @@
|
|||
2024-05-28T14:29:35.767371211-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:42408","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T14:33:59.491487682-04:00 Stopped serving new connections.
|
||||
2024-05-28T14:33:59.491691358-04:00 Graceful shutdown complete.
|
||||
2024-05-28T14:48:29.818981344-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:39974","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T15:17:42.692146237-04:00 Stopped serving new connections.
|
||||
2024-05-28T15:17:42.692268027-04:00 Graceful shutdown complete.
|
||||
2024-05-28T15:18:11.625875598-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:38550","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:49398","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T15:40:09.870951532-04:00 Stopped serving new connections.
|
||||
2024-05-28T15:40:09.871085499-04:00 Graceful shutdown complete.
|
||||
2024-05-28T15:40:24.154320042-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:58664","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T16:52:38.449545322-04:00 Stopped serving new connections.
|
||||
2024-05-28T16:52:38.449782355-04:00 Graceful shutdown complete.
|
||||
2024-05-28T16:52:47.19006644-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:35120","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T16:54:39.537910226-04:00 Stopped serving new connections.
|
||||
2024-05-28T16:54:39.538042526-04:00 Graceful shutdown complete.
|
||||
2024-05-28T16:54:43.991755643-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:42694","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T17:00:37.549759919-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:00:37.549895593-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:00:39.564002213-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:40190","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T17:01:53.902149407-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:01:53.902326423-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:01:55.853543897-04:00 Starting
|
||||
2024-05-28T17:01:57.187935277-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:01:57.188065162-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:02:02.342827445-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:55876","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T17:04:36.661802337-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:04:36.661918402-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:04:42.004255484-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:42466","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T17:13:11.636985677-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:13:11.637150614-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:13:19.225477323-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:35882","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T17:13:37.590697118-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:13:37.59075443-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:14:30.964387887-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:35074","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T17:14:40.412222772-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:14:40.412407892-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:19:09.579082129-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:52204","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T17:19:32.491870213-04:00 Stopped serving new connections.
|
||||
2024-05-28T17:19:32.492012026-04:00 Graceful shutdown complete.
|
||||
2024-05-28T17:20:12.700323661-04:00 Starting
|
||||
{"Accept":"*/*","Accept-Encoding":"gzip, deflate","Host":"localhost:7070","User-Agent":"HTTPie/3.2.1","ipAddr":"[::1]:42938","method":"GET","requestUri":"/x/touche?go-get=1"}
|
||||
2024-05-28T18:08:26.484225491-04:00 Stopped serving new connections.
|
||||
2024-05-28T18:08:26.484389145-04:00 Graceful shutdown complete.
|
|
@ -1,14 +0,0 @@
|
|||
{
|
||||
"mappings": [
|
||||
{
|
||||
"vanity_url":"scale.dev/x/migrate",
|
||||
"protocol":"git",
|
||||
"real_url":"https://github.com/scale/migrate"
|
||||
},
|
||||
{
|
||||
"vanity_url":"localhost:7070/x/touche",
|
||||
"protocol":"git",
|
||||
"real_url":"https://github.com/mine/touche"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
{
|
||||
"mappings": [
|
||||
{
|
||||
"vanity_url":"scale.dev/x/migrate",
|
||||
"protocol":"git",
|
||||
"real_url":"https://github.com/scale/migrate"
|
||||
},
|
||||
{
|
||||
"vanity_url":"codeberg.org/woodpecker-plugins/plugin-gitea-release",
|
||||
"protocol":"git",
|
||||
"real_url":"https://codeberg.org/woodpecker-plugins/gitea-release"
|
||||
},
|
||||
{
|
||||
"vanity_url":"localhost:7070/x/touche",
|
||||
"protocol":"git",
|
||||
"real_url":"https://github.com/mine/touche"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
{
|
||||
"mappings": [
|
||||
{
|
||||
"vanity_url":"codeberg.org/woodpecker-plugins/plugin-gitea-release",
|
||||
"protocol":"svn",
|
||||
"real_url":"https://codeberg.org/woodpecker-plugins/gitea-release"
|
||||
},
|
||||
{
|
||||
"vanity_url":"127.0.0.1/x/touche",
|
||||
"protocol":"git",
|
||||
"real_url":"https://github.com/mine/touche"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,260 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func mkDirForTest(t *testing.T, fp string) {
|
||||
err := os.MkdirAll(fp, os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func cpFileForTest(t *testing.T, src string, dst string) {
|
||||
var srcfd *os.File
|
||||
var dstfd *os.File
|
||||
var err error
|
||||
var srcinfo os.FileInfo
|
||||
srcfd, err = os.Open(src)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return
|
||||
} else {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
||||
// if err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
defer srcfd.Close()
|
||||
dstfd, err = os.Create(dst)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer dstfd.Close()
|
||||
_, err = io.Copy(dstfd, srcfd)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
srcinfo, err = os.Stat(src)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = os.Chmod(dst, srcinfo.Mode())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func writeForTest(t *testing.T, fp string, data []byte) {
|
||||
err := os.WriteFile(fp, data, 0666)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func IsDirEmpty(t *testing.T, name string) bool {
|
||||
f, err := os.Open(name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// read in ONLY one file
|
||||
_, err = f.Readdir(1)
|
||||
|
||||
// and if the file is EOF... well, the dir is empty.
|
||||
return err == io.EOF
|
||||
}
|
||||
|
||||
func doesFileExist(name string) bool {
|
||||
_, err := os.ReadFile(name)
|
||||
// defer fp.Close()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Derived from here (https://stackoverflow.com/a/55300382)
|
||||
func walkMatch(t *testing.T, root, pattern string) []string {
|
||||
var matches []string
|
||||
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
if matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {
|
||||
return err
|
||||
} else if matched {
|
||||
matches = append(matches, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
// func doesFileExist(name string) bool {
|
||||
// _, err := os.Stat(name)
|
||||
// return !errors.Is(err, fs.ErrNotExist)
|
||||
// }
|
||||
|
||||
func removeFileForTest(t *testing.T, name string) {
|
||||
err := os.Remove(name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func writeJsonForTest(t *testing.T, data map[string]any, fp string) {
|
||||
jsonString, _ := json.Marshal(data)
|
||||
err := os.WriteFile(fp, jsonString, os.ModePerm)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func isFileEmpty(t *testing.T, name string) bool {
|
||||
fd, err := os.Open(name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer fd.Close()
|
||||
finfo, err := fd.Stat()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return finfo.Size() < 1
|
||||
}
|
||||
|
||||
// Derived from here (https://stackoverflow.com/a/73411967)
|
||||
func areFilesTheSame(t *testing.T, fp_1 string, fp_2 string) bool {
|
||||
chunkSize := 4 * 1024
|
||||
|
||||
// shortcuts: check file metadata
|
||||
finfo_1, err := os.Stat(fp_1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
finfo_2, err := os.Stat(fp_2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// are inputs are literally the same file?
|
||||
if os.SameFile(finfo_1, finfo_2) {
|
||||
return true
|
||||
}
|
||||
|
||||
// do inputs at least have the same size?
|
||||
if finfo_1.Size() != finfo_2.Size() {
|
||||
return false
|
||||
}
|
||||
|
||||
// long way: compare contents
|
||||
fd_1, err := os.Open(fp_1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer fd_1.Close()
|
||||
|
||||
fd_2, err := os.Open(fp_2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer fd_2.Close()
|
||||
|
||||
bfd_1 := make([]byte, chunkSize)
|
||||
bfd_2 := make([]byte, chunkSize)
|
||||
for {
|
||||
n1, err1 := io.ReadFull(fd_1, bfd_1)
|
||||
n2, err2 := io.ReadFull(fd_2, bfd_2)
|
||||
|
||||
// https://pkg.go.dev/io#Reader
|
||||
// > Callers should always process the n > 0 bytes returned
|
||||
// > before considering the error err. Doing so correctly
|
||||
// > handles I/O errors that happen after reading some bytes
|
||||
// > and also both of the allowed EOF behaviors.
|
||||
|
||||
if !bytes.Equal(bfd_1[:n1], bfd_2[:n2]) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (err1 == io.EOF && err2 == io.EOF) || (err1 == io.ErrUnexpectedEOF && err2 == io.ErrUnexpectedEOF) {
|
||||
return true
|
||||
}
|
||||
|
||||
// some other error, like a dropped network connection or a bad transfer
|
||||
if err1 != nil {
|
||||
t.Fatal(err1)
|
||||
}
|
||||
if err2 != nil {
|
||||
t.Fatal(err2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readTestFile(t *testing.T, fp string) []byte {
|
||||
f, err := os.ReadFile(fp)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// func writeTestConfFile(t *testing.T, rulesPath string, logPath string) {
|
||||
|
||||
// writeJsonForTest(t, p, )
|
||||
|
||||
// }
|
||||
|
||||
// Integration tests
|
||||
|
||||
func newTestApp(t *testing.T, cfg *Config, lfg *LogFile) *Application {
|
||||
return &Application{
|
||||
Config: cfg,
|
||||
Log: lfg,
|
||||
}
|
||||
}
|
||||
|
||||
type testServer struct {
|
||||
*httptest.Server
|
||||
}
|
||||
|
||||
func newTestServer(t *testing.T, h http.Handler) *testServer {
|
||||
ts := httptest.NewServer(h)
|
||||
return &testServer{ts}
|
||||
}
|
||||
|
||||
func (ts *testServer) get(t *testing.T, urlPath string) (int, http.Header, []byte) {
|
||||
rs, err := ts.Client().Get(ts.URL + urlPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
defer rs.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(rs.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
body = bytes.TrimSpace(body)
|
||||
return rs.StatusCode, rs.Header, body
|
||||
}
|
Loading…
Reference in New Issue