From 6d0dafe8c22699e4227c48c126b848ccee6d0c4b Mon Sep 17 00:00:00 2001 From: qydysky Date: Tue, 5 Mar 2024 08:50:35 +0800 Subject: [PATCH] 1 --- README.md | 21 ++- config.go | 339 +++++++++++++++++++++++++++++++++++++------------ main.go | 189 ++++----------------------- main/main.go | 2 +- main/main.json | 15 +-- 5 files changed, 307 insertions(+), 259 deletions(-) diff --git a/README.md b/README.md index 249431d..8870a66 100755 --- a/README.md +++ b/README.md @@ -22,19 +22,36 @@ - path: string 路径 - splicing: int 当客户端支持cookie时,将会固定使用后端多少秒 - pathAdd: bool 将客户端访问的路径附加在path上 例:/api/req => /ws => /ws/api/req - - back: [] 后端, 可以动态增加/删除 + - matchHeader: [] 将会附加到每个backs,匹配客户端请求头,只有都匹配才使用此后端, 可以动态增加/删除 + - key: string 要匹配的header名 + - matchExp: string 要匹配的正则式 + - value: string 要匹配的值 + - reqHeader: [] 将会附加到每个backs,请求后端时,请求头处理器, 可以动态增加/删除 + - action: string 可选check、add、del、set。 + - key: string 具体处理哪个头 + - matchExp: string check时,如不匹配将结束请求。 + - value: string check时,如不匹配将结束请求。add时将附加值。set时将覆盖值。 + - resHeader: [] 将会附加到每个backs,返回后端的响应时,请求头处理器, 可以动态增加/删除 + - action: string 可选check、add、del、set。 + - key: string 具体处理哪个头 + - matchExp: string check时,如不匹配将结束请求。 + - value: string check时,如不匹配将结束请求。add时将附加值。set时将覆盖值。 + - backs: [] 后端, 可以动态增加/删除 - name: string 后端名称,将在日志中显示 - to: string 后端地址,例"s://www.baidu.com",会根据客户端自动添加http or ws在地址前 - weight: int 权重,按routes中的全部back的权重比分配,当权重变为0时,将停止新请求的进入 - errBanSec: int 当后端错误时(指连接失败,不指后端错误响应),将会禁用若干秒 - - matchHeader: [] 匹配客户端请求头,只有匹配才使用此后端, 可以动态增加/删除 + - matchHeader: [] 匹配客户端请求头,只有都匹配才使用此后端, 可以动态增加/删除 - key: string 要匹配的header名 + - matchExp: string 要匹配的正则式 - value: string 要匹配的值 - reqHeader: [] 请求后端时,请求头处理器, 可以动态增加/删除 - action: string 可选check、add、del、set。 - key: string 具体处理哪个头 + - matchExp: string check时,如不匹配将结束请求。 - value: string check时,如不匹配将结束请求。add时将附加值。set时将覆盖值。 - resHeader: [] 返回后端的响应时,请求头处理器, 可以动态增加/删除 - action: string 可选check、add、del、set。 - key: string 具体处理哪个头 + - matchExp: string check时,如不匹配将结束请求。 - value: string check时,如不匹配将结束请求。add时将附加值。set时将覆盖值。 diff --git a/config.go b/config.go index aa5365c..37674ec 100755 --- a/config.go +++ b/config.go @@ -1,142 +1,318 @@ package front import ( + "context" "crypto/md5" "crypto/tls" - "encoding/json" + "errors" "fmt" + "math/rand/v2" "net/http" + "regexp" "sync" "time" + pctx "github.com/qydysky/part/ctx" pslice "github.com/qydysky/part/slice" + pweb "github.com/qydysky/part/web" ) type Config struct { - lock sync.RWMutex - Sign []*Route `json:"-"` - Addr string `json:"addr"` + lock sync.RWMutex `json:"-"` + Addr string `json:"addr"` TLS struct { - Config *tls.Config `json:"-"` - Pub string `json:"pub"` - Key string `json:"key"` + Pub string `json:"pub"` + Key string `json:"key"` } `json:"tls"` MatchRule string `json:"matchRule"` CopyBlocks int `json:"copyBlocks"` BlocksI pslice.BlocksI[byte] `json:"-"` + oldRoutes []*Route `json:"-"` Routes []Route `json:"routes"` } -func (t *Config) SwapSign() []*Route { - var delRoute []*Route - for i := 0; i < len(t.Sign); i++ { +func (t *Config) Run(ctx context.Context, logger Logger) { + ctx, done := pctx.WithWait(ctx, 0, time.Minute) + defer done() + + routeP := pweb.WebPath{} + + var matchfunc func(path string) (func(w http.ResponseWriter, r *http.Request), bool) + switch t.MatchRule { + case "all": + matchfunc = routeP.Load + default: + matchfunc = routeP.LoadPerfix + } + + httpSer := http.Server{Addr: t.Addr} + if t.TLS.Key != "" && t.TLS.Pub != "" { + if cert, e := tls.LoadX509KeyPair(t.TLS.Pub, t.TLS.Key); e != nil { + logger.Error(`E:`, e) + } else { + httpSer.TLSConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + NextProtos: []string{"h2", "http/1.1"}, + } + } + } + if t.BlocksI == nil { + if t.CopyBlocks == 0 { + t.CopyBlocks = 1000 + } + t.BlocksI = pslice.NewBlocks[byte](16*1024, t.CopyBlocks) + } + + syncWeb := pweb.NewSyncMap(&httpSer, &routeP, matchfunc) + defer syncWeb.Shutdown() + + var addRoute = func(route *Route) { + logger.Info(`I:`, "路由加载", route.Path) + routeP.Store(route.Path, func(w http.ResponseWriter, r *http.Request) { + ctx1, done1 := pctx.WaitCtx(ctx) + defer done1() + + var backIs []*Back + if t, e := r.Cookie("_psign_" + cookie); e == nil { + if backP, ok := route.backMap.Load(t.Value); ok && backP.(*Back).IsLive() && Matched(backP.(*Back).MatchHeader, r) { + for i := 0; i < backP.(*Back).Weight; i++ { + backIs = append(backIs, backP.(*Back)) + } + } + } + + if len(backIs) == 0 { + backIs = append(backIs, route.FiliterBackByRequest(r)...) + } + + if len(backIs) == 0 { + w.WriteHeader(http.StatusServiceUnavailable) + return + } + + rand.Shuffle(len(backIs), func(i, j int) { + backIs[i], backIs[j] = backIs[j], backIs[i] + }) + + var e error + if r.Header.Get("Upgrade") == "websocket" { + e = wsDealer(ctx1, w, r, route.Path, backIs, logger, t.BlocksI) + } else { + e = httpDealer(ctx1, w, r, route.Path, backIs, logger, t.BlocksI) + } + if errors.Is(e, ErrHeaderCheckFail) { + w.WriteHeader(http.StatusForbidden) + return + } + }) + } + + var delRoute = func(route *Route) { + logger.Info(`I:`, "路由移除", route.Path) + routeP.Store(route.Path, nil) + } + + t.SwapSign(addRoute, delRoute, logger) + logger.Info(`I:`, "启动完成") + for { + select { + case <-ctx.Done(): + return + case <-time.After(time.Second * 10): + t.SwapSign(addRoute, delRoute, logger) + } + } +} + +func (t *Config) SwapSign(add func(*Route), del func(*Route), logger Logger) { + for i := 0; i < len(t.oldRoutes); i++ { var exist bool for k := 0; k < len(t.Routes); k++ { - if t.Sign[i].Path == t.Routes[k].Path { + if t.oldRoutes[i].Path == t.Routes[k].Path { exist = true break } } if !exist { - delRoute = append(delRoute, t.Sign[i]) + del(t.oldRoutes[i]) } } - t.Sign = t.Sign[:0] - for i := 0; i < len(t.Routes); i++ { - t.Sign = append(t.Sign, &t.Routes[i]) + var exist bool + for k := 0; k < len(t.oldRoutes); k++ { + if t.Routes[i].Path == t.oldRoutes[k].Path { + exist = true + break + } + } + if !exist { + add(&t.Routes[i]) + } } - return delRoute + t.oldRoutes = t.oldRoutes[:0] + + for i := 0; i < len(t.Routes); i++ { + t.Routes[i].SwapSign( + func(b *Back) { + logger.Info(`I:`, "后端加载", t.Routes[i].Path, b.Name) + b.PathAdd = t.Routes[i].PathAdd + b.Splicing = t.Routes[i].Splicing + b.MatchHeader = append(b.MatchHeader, t.Routes[i].MatchHeader...) + b.ReqHeader = append(b.ReqHeader, t.Routes[i].ReqHeader...) + b.ResHeader = append(b.ResHeader, t.Routes[i].ResHeader...) + t.Routes[i].backMap.Store(b.Id(), b) + }, + func(b *Back) { + logger.Info(`I:`, "后端移除", t.Routes[i].Path, b.Name) + t.Routes[i].backMap.Delete(b.Id()) + }, + logger, + ) + t.oldRoutes = append(t.oldRoutes, &t.Routes[i]) + } } type Route struct { - Path string `json:"path"` - Sign string `json:"-"` - Splicing int `json:"splicing"` - PathAdd bool `json:"pathAdd"` - Back []Back `json:"back"` -} + Path string `json:"path"` -func (t *Route) SwapSign() bool { - data, _ := json.Marshal(t) - w := md5.New() - w.Write(data) - sign := fmt.Sprintf("%x", w.Sum(nil)) - if t.Sign != sign { - t.Sign = sign - return true - } - return false + Splicing int `json:"splicing"` + PathAdd bool `json:"pathAdd"` + MatchHeader []Header `json:"matchHeader"` + ReqHeader []Header `json:"reqHeader"` + ResHeader []Header `json:"resHeader"` + + backMap sync.Map `json:"-"` + Backs []Back `json:"backs"` } -func (t *Route) GenBack() []*Back { - var backLink []*Back - for i := 0; i < len(t.Back); i++ { - back := &t.Back[i] - back.SwapSign() - if back.Weight == 0 { - continue +func (t *Route) SwapSign(add func(*Back), del func(*Back), logger Logger) { + logger.Info(t.Path) + t.backMap.Range(func(key, value any) bool { + var exist bool + for k := 0; k < len(t.Backs); k++ { + if key.(string) == t.Backs[k].Id() { + exist = true + break + } } - tmpBack := Back{ - Name: back.Name, - Splicing: t.Splicing, - Sign: back.Sign, - To: back.To, - Weight: back.Weight, - ErrBanSec: back.ErrBanSec, - PathAdd: t.PathAdd, - MatchHeader: append([]Header{}, back.MatchHeader...), - ReqHeader: append([]Header{}, back.ReqHeader...), - ResHeader: append([]Header{}, back.ResHeader...), + if !exist { + del(value.(*Back)) } - for i := 1; i <= back.Weight; i++ { - backLink = append(backLink, &tmpBack) + return true + }) + + for i := 0; i < len(t.Backs); i++ { + if _, ok := t.backMap.Load(t.Backs[i].Id()); !ok { + add(&t.Backs[i]) } } - return backLink } -func FiliterBackByRequest(backs []*Back, r *http.Request) []*Back { +// func (t *Route) GenBack() []*Back { +// var backLink []*Back +// for i := 0; i < len(t.Back); i++ { +// back := &t.Back[i] +// back.SwapSign() +// if back.Weight == 0 { +// continue +// } +// tmpBack := Back{ +// Name: back.Name, +// Splicing: t.Splicing, +// Sign: back.Sign, +// To: back.To, +// Weight: back.Weight, +// ErrBanSec: back.ErrBanSec, +// PathAdd: t.PathAdd, +// MatchHeader: append(t.MatchHeader, back.MatchHeader...), +// ReqHeader: append(t.ReqHeader, back.ReqHeader...), +// ResHeader: append(t.ResHeader, back.ResHeader...), +// } +// for i := 1; i <= back.Weight; i++ { +// backLink = append(backLink, &tmpBack) +// } +// } +// return backLink +// } + +func (t *Route) FiliterBackByRequest(r *http.Request) []*Back { var backLink []*Back - for i := 0; i < len(backs); i++ { - matchs := len(backs[i].MatchHeader) - 1 - for ; matchs >= 0 && - r.Header.Get(backs[i].MatchHeader[matchs].Key) == backs[i].MatchHeader[matchs].Value; matchs -= 1 { - } - if matchs == -1 && backs[i].IsLive() { - backLink = append(backLink, backs[i]) + for i := 0; i < len(t.Backs); i++ { + if t.Backs[i].IsLive() && Matched(t.Backs[i].MatchHeader, r) { + for k := 0; k < t.Backs[i].Weight; k++ { + backLink = append(backLink, &t.Backs[i]) + } } } return backLink } type Back struct { - lock sync.RWMutex - Sign string `json:"-"` - Splicing int `json:"-"` - PathAdd bool `json:"-"` - upT time.Time - Name string `json:"name"` - To string `json:"to"` - Weight int `json:"weight"` - ErrBanSec int `json:"errBanSec"` + lock sync.RWMutex `json:"-"` + upT time.Time `json:"-"` + + Name string `json:"name"` + To string `json:"to"` + Weight int `json:"weight"` + ErrBanSec int `json:"errBanSec"` + + Splicing int `json:"-"` + PathAdd bool `json:"-"` MatchHeader []Header `json:"matchHeader"` ReqHeader []Header `json:"reqHeader"` ResHeader []Header `json:"resHeader"` } -func (t *Back) SwapSign() bool { - data, _ := json.Marshal(t) +// func (t *Back) Init() (e error) { +// for i := 0; i < len(t.MatchHeader); i++ { +// e = t.MatchHeader[i].Init() +// if e != nil { +// return e +// } +// } +// for i := 0; i < len(t.ReqHeader); i++ { +// e = t.ReqHeader[i].Init() +// if e != nil { +// return e +// } +// } +// for i := 0; i < len(t.ResHeader); i++ { +// e = t.ResHeader[i].Init() +// if e != nil { +// return e +// } +// } +// return +// } + +func (t *Back) Id() string { w := md5.New() - w.Write(data) - sign := fmt.Sprintf("%x", w.Sum(nil)) - if t.Sign != sign { - t.Sign = sign - return true + w.Write([]byte(t.Name + t.To)) + return fmt.Sprintf("%x", w.Sum(nil)) +} + +func Matched(matchHeader []Header, r *http.Request) bool { + matchs := len(matchHeader) - 1 + for ; matchs >= 0; matchs -= 1 { + if !MatchedOne(matchHeader[matchs], r.Header.Get(matchHeader[matchs].Key)) { + break + } + } + return matchs == -1 +} + +func MatchedOne(matchHeader Header, value string) bool { + if matchHeader.Value != "" && value != matchHeader.Value { + return false + } + if matchHeader.MatchExp != "" { + if regexp, e := regexp.Compile(matchHeader.MatchExp); e != nil || !regexp.MatchString(value) { + return false + } } - return false + return true } func (t *Back) IsLive() bool { @@ -155,7 +331,8 @@ func (t *Back) Disable() { } type Header struct { - Action string `json:"action"` - Key string `json:"key"` - Value string `json:"value"` + Action string `json:"action"` + Key string `json:"key"` + MatchExp string `json:"matchExp"` + Value string `json:"value"` } diff --git a/main.go b/main.go index b3119b3..bdf875c 100755 --- a/main.go +++ b/main.go @@ -14,6 +14,7 @@ import ( "net/http" "net/http/httptrace" "net/url" + "regexp" "strings" "time" _ "unsafe" @@ -86,70 +87,6 @@ func Test(ctx context.Context, port int, logger Logger) { var cookie = fmt.Sprintf("%p", &struct{}{}) -// 转发 -func Run(ctx context.Context, configSP *Config, logger Logger) { - // 根ctx - ctx, cancle := pctx.WithWait(ctx, 0, time.Minute) - defer func() { - if errors.Is(cancle(), pctx.ErrWaitTo) { - logger.Error(`E:`, "退出超时") - } - }() - - // 路由 - routeP := pweb.WebPath{} - - logger.Info(`I:`, "启动...") - defer logger.Info(`I:`, "退出,等待1min连接关闭...") - - // config对象初次加载 - if e := applyConfig(ctx, configSP, &routeP, logger); e != nil { - return - } - - // matchfunc - var matchfunc func(path string) (func(w http.ResponseWriter, r *http.Request), bool) - switch configSP.MatchRule { - case "prefix": - logger.Info(`I:`, "匹配规则", "prefix") - matchfunc = routeP.LoadPerfix - case "all": - logger.Info(`I:`, "匹配规则", "all") - matchfunc = routeP.Load - default: - logger.Error(`E:`, "匹配规则", "无效") - return - } - - httpSer := http.Server{ - Addr: configSP.Addr, - } - - if configSP.TLS.Config != nil { - httpSer.TLSConfig = configSP.TLS.Config.Clone() - } - - if configSP.BlocksI == nil { - if configSP.CopyBlocks == 0 { - configSP.CopyBlocks = 1000 - } - configSP.BlocksI = pslice.NewBlocks[byte](16*1024, configSP.CopyBlocks) - } - - syncWeb := pweb.NewSyncMap(&httpSer, &routeP, matchfunc) - defer syncWeb.Shutdown() - - // 定时加载config - for { - select { - case <-time.After(time.Second * 5): - _ = applyConfig(ctx, configSP, &routeP, logger) - case <-ctx.Done(): - return - } - } -} - func loadConfig(buf []byte, configF File, configS *[]Config) error { if i, e := configF.Read(buf); e != nil && !errors.Is(e, io.EOF) { return e @@ -163,18 +100,6 @@ func loadConfig(buf []byte, configF File, configS *[]Config) error { if e := json.Unmarshal(buf[:i], configS); e != nil { return e } - for i := 0; i < len((*configS)); i++ { - if (*configS)[i].TLS.Config == nil && (*configS)[i].TLS.Key != "" && (*configS)[i].TLS.Pub != "" { - if cert, e := tls.LoadX509KeyPair((*configS)[i].TLS.Pub, (*configS)[i].TLS.Key); e != nil { - return e - } else { - (*configS)[i].TLS.Config = &tls.Config{ - Certificates: []tls.Certificate{cert}, - NextProtos: []string{"h2", "http/1.1"}, - } - } - } - } } return nil } @@ -182,84 +107,6 @@ func loadConfig(buf []byte, configF File, configS *[]Config) error { //go:linkname nanotime1 runtime.nanotime1 func nanotime1() int64 -func applyConfig(ctx context.Context, configS *Config, routeP *pweb.WebPath, logger Logger) error { - configS.lock.RLock() - defer configS.lock.RUnlock() - - for _, v := range configS.SwapSign() { - logger.Info(`I:`, "路由移除", v.Path) - routeP.Store(v.Path, nil) - v.Sign = "" - } - - for i := 0; i < len(configS.Routes); i++ { - route := &configS.Routes[i] - path := route.Path - - if !route.SwapSign() { - continue - } - - if len(route.Back) == 0 { - logger.Info(`I:`, "路由移除", path) - routeP.Store(path, nil) - continue - } - - backArray := route.GenBack() - - if len(backArray) == 0 { - logger.Info(`I:`, "路由移除", path) - routeP.Store(path, nil) - continue - } - - backMap := make(map[string]*Back) - - for i := 0; i < len(backArray); i++ { - backMap[backArray[i].Sign] = backArray[i] - } - - logger.Info(`I:`, "路由更新", path) - - routeP.Store(path, func(w http.ResponseWriter, r *http.Request) { - ctx1, done1 := pctx.WaitCtx(ctx) - defer done1() - - var backIs []*Back - if validCookieDomain(r.Host) { - if t, e := r.Cookie("_psign_" + cookie); e == nil { - if tmp, ok := backMap[t.Value]; ok { - backIs = append(backIs, tmp) - } - } - } - - if len(backIs) == 0 { - backIs = append(backIs, FiliterBackByRequest(backArray, r)...) - } - - if len(backIs) == 0 { - w.WriteHeader(http.StatusServiceUnavailable) - logger.Error(`W:`, fmt.Sprintf("%s=> 无可用后端", path)) - return - } - - var e error - if r.Header.Get("Upgrade") == "websocket" { - e = wsDealer(ctx1, w, r, path, backIs, logger, configS.BlocksI) - } else { - e = httpDealer(ctx1, w, r, path, backIs, logger, configS.BlocksI) - } - if errors.Is(e, ErrHeaderCheckFail) { - w.WriteHeader(http.StatusForbidden) - return - } - }) - } - return nil -} - var ( ErrRedirect = errors.New("ErrRedirect") ErrNoHttp = errors.New("ErrNoHttp") @@ -328,13 +175,17 @@ func httpDealer(ctx context.Context, w http.ResponseWriter, r *http.Request, rou logger.Debug(`T:`, fmt.Sprintf("http %s=>%s %v", routePath, chosenBack.Name, time.Since(opT))) - if validCookieDomain(r.Host) { - w.Header().Add("Set-Cookie", (&http.Cookie{ + { + cookie := &http.Cookie{ Name: "_psign_" + cookie, - Value: chosenBack.Sign, + Value: chosenBack.Id(), MaxAge: chosenBack.Splicing, - Domain: r.Host, - }).String()) + Path: "/", + } + if validCookieDomain(r.Host) { + cookie.Domain = r.Host + } + w.Header().Add("Set-Cookie", (cookie).String()) } w.Header().Add("_pto_"+cookie, chosenBack.Name) @@ -409,13 +260,17 @@ func wsDealer(ctx context.Context, w http.ResponseWriter, r *http.Request, route logger.Debug(`T:`, fmt.Sprintf("ws %s=>%s %v", routePath, chosenBack.Name, time.Since(opT))) - if validCookieDomain(r.Host) { - w.Header().Add("Set-Cookie", (&http.Cookie{ + { + cookie := &http.Cookie{ Name: "_psign_" + cookie, - Value: chosenBack.Sign, + Value: chosenBack.Id(), MaxAge: chosenBack.Splicing, - Domain: r.Host, - }).String()) + Path: "/", + } + if validCookieDomain(r.Host) { + cookie.Domain = r.Host + } + w.Header().Add("Set-Cookie", (cookie).String()) } w.Header().Add("_pto_"+cookie, chosenBack.Name) @@ -473,9 +328,13 @@ func copyHeader(s, t http.Header, app []Header) error { for _, v := range app { switch v.Action { case `check`: - if val := tm[v.Key]; val[0] != v.Value { + if !MatchedOne(v, tm[v.Key][0]) { return ErrHeaderCheckFail } + case `replace`: + if va := t.Get(v.Key); va != "" { + t.Set(v.Key, regexp.MustCompile(v.MatchExp).ReplaceAllString(va, v.Value)) + } case `set`: t.Set(v.Key, v.Value) case `add`: diff --git a/main/main.go b/main/main.go index 930b1b7..5508203 100755 --- a/main/main.go +++ b/main/main.go @@ -74,7 +74,7 @@ func main() { go pfront.Test(ctx, *testP, logger.Base("测试")) for i := 0; i < len(configS); i++ { - go pfront.Run(ctx, &configS[i], logger.Base(configS[i].Addr)) + go configS[i].Run(ctx, logger.Base(configS[i].Addr)) } // ctrl+c退出 diff --git a/main/main.json b/main/main.json index fdef0b2..6375fb2 100755 --- a/main/main.json +++ b/main/main.json @@ -6,8 +6,9 @@ "routes": [ { "path": "/", + "splicing": 100, "pathAdd": true, - "back": [ + "backs": [ { "name": "baidu1", "to": "s://www.baidu.com", @@ -17,17 +18,11 @@ "name": "baidu2", "to": "s://www.baidu.com", "weight": 1 - } - ] - }, - { - "path": "/s", - "pathAdd": true, - "back": [ + }, { - "name": "baidus", + "name": "baidu3", "to": "s://www.baidu.com", - "weight": 1 + "weight": 0 } ] } -- 2.39.2