gocustomurls/logger.go

323 lines
6.6 KiB
Go

package main
import (
"bufio"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"log"
"math"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"time"
)
// some headers not worth logging
var (
hdrsToNotLog = []string{
"Accept-Language",
"Cache-Control",
"Cf-Ray",
"CF-Visitor",
"CF-Connecting-IP",
"Cdn-Loop",
"Cookie",
"Connection",
"Dnt",
"If-Modified-Since",
"Sec-Fetch-Dest",
"Sec-Ch-Ua-Mobile",
// "Sec-Ch-Ua",
"Sec-Ch-Ua-Platform",
"Sec-Fetch-Site",
"Sec-Fetch-Mode",
"Sec-Fetch-User",
"Upgrade-Insecure-Requests",
"X-Request-Start",
"X-Forwarded-For",
"X-Forwarded-Proto",
"X-Forwarded-Host",
}
hdrsToNotLogMap map[string]bool
)
type LogFile struct {
handle *os.File
logger *log.Logger
path string
fileLock sync.Mutex
}
type LogFileRec struct {
Method string `json:"method"`
IpAddr string `json:"ipAddr"`
Url string `json:"url"`
}
func (lf *LogFile) makeCopyTo(dst string) error {
var err error
r, err := os.Open(lf.path)
if err != nil {
return err
}
defer r.Close()
w, err := os.OpenFile(dst, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
return err
}
defer func() {
if c := w.Close(); err == nil {
err = c
}
}()
_, err = io.Copy(w, r)
return err
}
func (lf *LogFile) truncate() error {
fd, err := os.OpenFile(lf.path, os.O_TRUNC, 0666)
if err != nil {
return fmt.Errorf("could not open file %q for truncation: %v", lf.path, err)
}
err = fd.Close()
if err != nil {
return fmt.Errorf("could not close file handler for %q after truncation: %v", lf.path, err)
}
return nil
}
func prettyByteSize(b int64) string {
bf := float64(b)
for _, unit := range []string{"", "K", "M", "G", "T", "P"} {
if math.Abs(bf) < 1024.0 {
return fmt.Sprintf("%3.1f%sB", bf, unit)
}
bf /= 1024.0
}
return fmt.Sprintf("%.1fEB", bf)
}
func compressOldFile(fname string) error {
reader, err := os.Open(fname)
if err != nil {
return fmt.Errorf("compressOldFile: failed to open existing file %s: %w", fname, err)
}
defer reader.Close()
buffer := bufio.NewReader(reader)
fnameGz := fname + ".gz"
fw, err := os.OpenFile(fnameGz, os.O_WRONLY|os.O_CREATE, 0666)
if err != nil {
return fmt.Errorf("compressOldFile: failed to open new file %s: %w", fnameGz, err)
}
defer fw.Close()
zw, err := gzip.NewWriterLevel(fw, gzip.BestCompression)
if err != nil {
return fmt.Errorf("compressOldFile: failed to create gzip writer: %w", err)
}
defer zw.Close()
_, err = buffer.WriteTo(zw)
if err != nil {
_ = zw.Close()
_ = fw.Close()
return fmt.Errorf("compressOldFile: failed to write to gz file: %w", err)
}
_ = reader.Close()
err = os.Remove(fname)
if err != nil {
return fmt.Errorf("compressOldFile: failed to delete old file: %w", err)
}
return nil
}
func (lf *LogFile) rotate(canCompress bool) error {
lf.fileLock.Lock()
defer lf.fileLock.Unlock()
// new file
newFilePrefix := fmt.Sprintf("%s.%s", lf.handle.Name(), time.Now().Format("2006-01-02"))
// close file to allow for read-only access
err := lf.handle.Close()
if err != nil {
return err
}
// make a copy of the old log file
err = lf.makeCopyTo(newFilePrefix)
if err != nil {
return err
}
// compress the new log file
if canCompress {
err = compressOldFile(newFilePrefix)
if err != nil {
return err
}
}
// truncate the old log file
err = lf.truncate()
if err != nil {
return err
}
f, err := os.OpenFile(lf.path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
return err
}
lf.handle = f
return nil
}
func (lf *LogFile) open(maxSize string, canCompress bool) error {
f, err := os.OpenFile(lf.path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
return err
}
finfo, err := f.Stat()
if err != nil {
return err
}
curSize := prettyByteSize(finfo.Size())
if len(strings.TrimSpace(maxSize)) != 0 && curSize > maxSize {
err = lf.rotate(canCompress)
if err != nil {
return err
}
}
lf.handle = f
lf.logger = log.New(f, "", 0)
return nil
}
func newFileLogger(path string, maxSize string, canCompress bool) (*LogFile, error) {
requestedFile := filepath.Clean(filepath.Join("/", path))
parentDir := filepath.Dir(requestedFile)
err := os.MkdirAll(parentDir, 0755)
if err != nil {
return nil, err
}
lf := &LogFile{
path: path,
}
err = lf.open(maxSize, canCompress)
return lf, err
// f, err := os.OpenFile(requestedFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
// if err != nil {
// return nil, err
// }
// return &LogFile{
// handle: f,
// logger: log.New(f, "", 0),
// path: path,
// }, nil
}
func (f *LogFile) Close() error {
if f == nil {
return nil
}
f.fileLock.Lock()
defer f.fileLock.Unlock()
err := f.handle.Close()
f.handle = nil
return err
}
func extractFirstFragment(header *http.Header, headerName string) string {
s := header.Get(headerName)
if len(strings.TrimSpace(s)) == 0 {
return s
}
fragments := strings.Split(s, ",")
return strings.TrimSpace(fragments[0])
}
// Get Ip Address of the client
func extractIpAddress(r *http.Request) string {
var ipAddr string
if r == nil {
return ""
}
possibleIpHeaders := []string{"CF-Connecting-IP", "X-Real-Ip", "X-Forwarded-For"}
for _, header := range possibleIpHeaders {
ipAddr = extractFirstFragment(&r.Header, header)
if len(strings.TrimSpace(ipAddr)) != 0 {
return ipAddr
}
}
// pull ip from Request.RemoteAddr
if len(strings.TrimSpace(r.RemoteAddr)) != 0 {
index := strings.LastIndex(r.RemoteAddr, ";")
if index == -1 {
return r.RemoteAddr
}
ipAddr = r.RemoteAddr[:index]
}
return ipAddr
}
func canSkipExtraHeaders(r *http.Request) bool {
ref := r.Header.Get("Referer")
if len(strings.TrimSpace(ref)) == 0 {
return false
}
return strings.Contains(ref, r.Host)
}
func shouldLogHeader(s string) bool {
if hdrsToNotLogMap == nil {
hdrsToNotLogMap = map[string]bool{}
for _, h := range hdrsToNotLog {
h = strings.ToLower(h)
hdrsToNotLogMap[h] = true
}
}
s = strings.ToLower(s)
return !hdrsToNotLogMap[s]
}
func getCurrentDate() string {
dt := time.Now()
return dt.Format(time.RFC3339Nano)
}
func (f *LogFile) WriteLog(r *http.Request) error {
if f == nil {
return nil
}
f.fileLock.Lock()
defer f.fileLock.Unlock()
var rec = make(map[string]string)
rec["method"] = r.Method
rec["requestUri"] = r.RequestURI
rec["Host"] = r.Host
rec["ipAddr"] = extractIpAddress(r)
rec["requestDate"] = getCurrentDate()
if !canSkipExtraHeaders(r) {
for key, val := range r.Header {
if shouldLogHeader(key) && len(val) > 0 {
rec[key] = val[0]
}
}
}
b, err := json.Marshal(rec)
if err != nil {
return err
}
f.logger.Println(string(b))
return nil
}