summaryrefslogtreecommitdiff
path: root/modules/caddyhttp/reverseproxy
diff options
context:
space:
mode:
Diffstat (limited to 'modules/caddyhttp/reverseproxy')
-rwxr-xr-xmodules/caddyhttp/reverseproxy/healthchecker.go64
-rwxr-xr-xmodules/caddyhttp/reverseproxy/module.go46
-rwxr-xr-xmodules/caddyhttp/reverseproxy/reverseproxy.go562
-rwxr-xr-xmodules/caddyhttp/reverseproxy/upstream.go413
4 files changed, 1085 insertions, 0 deletions
diff --git a/modules/caddyhttp/reverseproxy/healthchecker.go b/modules/caddyhttp/reverseproxy/healthchecker.go
new file mode 100755
index 0000000..add3aa0
--- /dev/null
+++ b/modules/caddyhttp/reverseproxy/healthchecker.go
@@ -0,0 +1,64 @@
+package reverseproxy
+
+import (
+ "net/http"
+ "time"
+)
+
+// Upstream represents the interface that must be satisfied to use the healthchecker.
+type Upstream interface {
+ SetHealthiness(bool)
+}
+
+// HealthChecker represents a worker that periodically evaluates if proxy upstream host is healthy.
+type HealthChecker struct {
+ upstream Upstream
+ Ticker *time.Ticker
+ HTTPClient *http.Client
+}
+
+// ScheduleChecks periodically runs health checks against an upstream host.
+func (h *HealthChecker) ScheduleChecks(url string) {
+ // check if a host is healthy on start vs waiting for timer
+ h.upstream.SetHealthiness(h.IsHealthy(url))
+
+ for {
+ select {
+ case <-h.Ticker.C:
+ h.upstream.SetHealthiness(h.IsHealthy(url))
+ }
+ }
+}
+
+// Stop stops the healthchecker from makeing further requests.
+func (h *HealthChecker) Stop() {
+ h.Ticker.Stop()
+}
+
+// IsHealthy attempts to check if a upstream host is healthy.
+func (h *HealthChecker) IsHealthy(url string) bool {
+ req, err := http.NewRequest(http.MethodGet, url, nil)
+ if err != nil {
+ return false
+ }
+
+ resp, err := h.HTTPClient.Do(req)
+ if err != nil {
+ return false
+ }
+
+ if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+ return false
+ }
+
+ return true
+}
+
+// NewHealthCheckWorker returns a new instance of a HealthChecker.
+func NewHealthCheckWorker(u Upstream, interval time.Duration, client *http.Client) *HealthChecker {
+ return &HealthChecker{
+ upstream: u,
+ Ticker: time.NewTicker(interval),
+ HTTPClient: client,
+ }
+}
diff --git a/modules/caddyhttp/reverseproxy/module.go b/modules/caddyhttp/reverseproxy/module.go
new file mode 100755
index 0000000..cc53bf5
--- /dev/null
+++ b/modules/caddyhttp/reverseproxy/module.go
@@ -0,0 +1,46 @@
+package reverseproxy
+
+import (
+ "fmt"
+
+ "bitbucket.org/lightcodelabs/caddy2"
+)
+
+// Register caddy module.
+func init() {
+ caddy2.RegisterModule(caddy2.Module{
+ Name: "http.responders.reverse_proxy",
+ New: func() (interface{}, error) { return new(LoadBalanced), nil },
+ OnLoad: func(instances []interface{}, _ interface{}) (interface{}, error) {
+ // we don't need to do anything with prior state because healthcheckers are
+ // cleaned up in OnUnload.
+ s := &State{
+ HealthCheckers: []*HealthChecker{},
+ }
+
+ for _, i := range instances {
+ lb := i.(*LoadBalanced)
+
+ err := NewLoadBalancedReverseProxy(lb, s)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return s, nil
+ },
+ OnUnload: func(state interface{}) error {
+ s, ok := state.(*State)
+ if !ok {
+ return fmt.Errorf("proxy OnLoad: prior state not expected proxy.State type")
+ }
+
+ // cleanup old healthcheckers
+ for _, hc := range s.HealthCheckers {
+ hc.Stop()
+ }
+
+ return nil
+ },
+ })
+}
diff --git a/modules/caddyhttp/reverseproxy/reverseproxy.go b/modules/caddyhttp/reverseproxy/reverseproxy.go
new file mode 100755
index 0000000..e5020e2
--- /dev/null
+++ b/modules/caddyhttp/reverseproxy/reverseproxy.go
@@ -0,0 +1,562 @@
+package reverseproxy
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http/httpguts"
+)
+
+// ReverseProxy is an HTTP Handler that takes an incoming request and
+// sends it to another server, proxying the response back to the
+// client.
+type ReverseProxy struct {
+ // Director must be a function which modifies
+ // the request into a new request to be sent
+ // using Transport. Its response is then copied
+ // back to the original client unmodified.
+ // Director must not access the provided Request
+ // after returning.
+ Director func(*http.Request)
+
+ // The transport used to perform proxy requests.
+ // If nil, http.DefaultTransport is used.
+ Transport http.RoundTripper
+
+ // FlushInterval specifies the flush interval
+ // to flush to the client while copying the
+ // response body.
+ // If zero, no periodic flushing is done.
+ // A negative value means to flush immediately
+ // after each write to the client.
+ // The FlushInterval is ignored when ReverseProxy
+ // recognizes a response as a streaming response;
+ // for such responses, writes are flushed to the client
+ // immediately.
+ FlushInterval time.Duration
+
+ // ErrorLog specifies an optional logger for errors
+ // that occur when attempting to proxy the request.
+ // If nil, logging goes to os.Stderr via the log package's
+ // standard logger.
+ ErrorLog *log.Logger
+
+ // BufferPool optionally specifies a buffer pool to
+ // get byte slices for use by io.CopyBuffer when
+ // copying HTTP response bodies.
+ BufferPool BufferPool
+
+ // ModifyResponse is an optional function that modifies the
+ // Response from the backend. It is called if the backend
+ // returns a response at all, with any HTTP status code.
+ // If the backend is unreachable, the optional ErrorHandler is
+ // called without any call to ModifyResponse.
+ //
+ // If ModifyResponse returns an error, ErrorHandler is called
+ // with its error value. If ErrorHandler is nil, its default
+ // implementation is used.
+ ModifyResponse func(*http.Response) error
+
+ // ErrorHandler is an optional function that handles errors
+ // reaching the backend or errors from ModifyResponse.
+ //
+ // If nil, the default is to log the provided error and return
+ // a 502 Status Bad Gateway response.
+ ErrorHandler func(http.ResponseWriter, *http.Request, error)
+}
+
+// A BufferPool is an interface for getting and returning temporary
+// byte slices for use by io.CopyBuffer.
+type BufferPool interface {
+ Get() []byte
+ Put([]byte)
+}
+
+func singleJoiningSlash(a, b string) string {
+ aslash := strings.HasSuffix(a, "/")
+ bslash := strings.HasPrefix(b, "/")
+ switch {
+ case aslash && bslash:
+ return a + b[1:]
+ case !aslash && !bslash:
+ return a + "/" + b
+ }
+ return a + b
+}
+
+// NewSingleHostReverseProxy returns a new ReverseProxy that routes
+// URLs to the scheme, host, and base path provided in target. If the
+// target's path is "/base" and the incoming request was for "/dir",
+// the target request will be for /base/dir.
+// NewSingleHostReverseProxy does not rewrite the Host header.
+// To rewrite Host headers, use ReverseProxy directly with a custom
+// Director policy.
+func NewSingleHostReverseProxy(target *url.URL) *ReverseProxy {
+ targetQuery := target.RawQuery
+ director := func(req *http.Request) {
+ req.URL.Scheme = target.Scheme
+ req.URL.Host = target.Host
+ req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)
+ if targetQuery == "" || req.URL.RawQuery == "" {
+ req.URL.RawQuery = targetQuery + req.URL.RawQuery
+ } else {
+ req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery
+ }
+ if _, ok := req.Header["User-Agent"]; !ok {
+ // explicitly disable User-Agent so it's not set to default value
+ req.Header.Set("User-Agent", "")
+ }
+ }
+ return &ReverseProxy{Director: director}
+}
+
+func copyHeader(dst, src http.Header) {
+ for k, vv := range src {
+ for _, v := range vv {
+ dst.Add(k, v)
+ }
+ }
+}
+
+func cloneHeader(h http.Header) http.Header {
+ h2 := make(http.Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
+ }
+ return h2
+}
+
+// Hop-by-hop headers. These are removed when sent to the backend.
+// As of RFC 7230, hop-by-hop headers are required to appear in the
+// Connection header field. These are the headers defined by the
+// obsoleted RFC 2616 (section 13.5.1) and are used for backward
+// compatibility.
+var hopHeaders = []string{
+ "Connection",
+ "Proxy-Connection", // non-standard but still sent by libcurl and rejected by e.g. google
+ "Keep-Alive",
+ "Proxy-Authenticate",
+ "Proxy-Authorization",
+ "Te", // canonicalized version of "TE"
+ "Trailer", // not Trailers per URL above; https://www.rfc-editor.org/errata_search.php?eid=4522
+ "Transfer-Encoding",
+ "Upgrade",
+}
+
+func (p *ReverseProxy) defaultErrorHandler(rw http.ResponseWriter, req *http.Request, err error) {
+ p.logf("http: proxy error: %v", err)
+ rw.WriteHeader(http.StatusBadGateway)
+}
+
+func (p *ReverseProxy) getErrorHandler() func(http.ResponseWriter, *http.Request, error) {
+ if p.ErrorHandler != nil {
+ return p.ErrorHandler
+ }
+ return p.defaultErrorHandler
+}
+
+// modifyResponse conditionally runs the optional ModifyResponse hook
+// and reports whether the request should proceed.
+func (p *ReverseProxy) modifyResponse(rw http.ResponseWriter, res *http.Response, req *http.Request) bool {
+ if p.ModifyResponse == nil {
+ return true
+ }
+ if err := p.ModifyResponse(res); err != nil {
+ res.Body.Close()
+ p.getErrorHandler()(rw, req, err)
+ return false
+ }
+ return true
+}
+
+func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) (*http.Response, error) {
+ transport := p.Transport
+ if transport == nil {
+ transport = http.DefaultTransport
+ }
+
+ ctx := req.Context()
+ if cn, ok := rw.(http.CloseNotifier); ok {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithCancel(ctx)
+ defer cancel()
+ notifyChan := cn.CloseNotify()
+ go func() {
+ select {
+ case <-notifyChan:
+ cancel()
+ case <-ctx.Done():
+ }
+ }()
+ }
+
+ outreq := req.WithContext(ctx) // includes shallow copies of maps, but okay
+ if req.ContentLength == 0 {
+ outreq.Body = nil // Issue 16036: nil Body for http.Transport retries
+ }
+
+ outreq.Header = cloneHeader(req.Header)
+
+ p.Director(outreq)
+ outreq.Close = false
+
+ reqUpType := upgradeType(outreq.Header)
+ removeConnectionHeaders(outreq.Header)
+
+ // Remove hop-by-hop headers to the backend. Especially
+ // important is "Connection" because we want a persistent
+ // connection, regardless of what the client sent to us.
+ for _, h := range hopHeaders {
+ hv := outreq.Header.Get(h)
+ if hv == "" {
+ continue
+ }
+ if h == "Te" && hv == "trailers" {
+ // Issue 21096: tell backend applications that
+ // care about trailer support that we support
+ // trailers. (We do, but we don't go out of
+ // our way to advertise that unless the
+ // incoming client request thought it was
+ // worth mentioning)
+ continue
+ }
+ outreq.Header.Del(h)
+ }
+
+ // After stripping all the hop-by-hop connection headers above, add back any
+ // necessary for protocol upgrades, such as for websockets.
+ if reqUpType != "" {
+ outreq.Header.Set("Connection", "Upgrade")
+ outreq.Header.Set("Upgrade", reqUpType)
+ }
+
+ if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
+ // If we aren't the first proxy retain prior
+ // X-Forwarded-For information as a comma+space
+ // separated list and fold multiple headers into one.
+ if prior, ok := outreq.Header["X-Forwarded-For"]; ok {
+ clientIP = strings.Join(prior, ", ") + ", " + clientIP
+ }
+ outreq.Header.Set("X-Forwarded-For", clientIP)
+ }
+
+ res, err := transport.RoundTrip(outreq)
+ if err != nil {
+ p.getErrorHandler()(rw, outreq, err)
+ return nil, err
+ }
+
+ // Deal with 101 Switching Protocols responses: (WebSocket, h2c, etc)
+ if res.StatusCode == http.StatusSwitchingProtocols {
+ if !p.modifyResponse(rw, res, outreq) {
+ return res, nil
+ }
+
+ p.handleUpgradeResponse(rw, outreq, res)
+ return res, nil
+ }
+
+ removeConnectionHeaders(res.Header)
+
+ for _, h := range hopHeaders {
+ res.Header.Del(h)
+ }
+
+ if !p.modifyResponse(rw, res, outreq) {
+ return res, nil
+ }
+
+ copyHeader(rw.Header(), res.Header)
+
+ // The "Trailer" header isn't included in the Transport's response,
+ // at least for *http.Transport. Build it up from Trailer.
+ announcedTrailers := len(res.Trailer)
+ if announcedTrailers > 0 {
+ trailerKeys := make([]string, 0, len(res.Trailer))
+ for k := range res.Trailer {
+ trailerKeys = append(trailerKeys, k)
+ }
+ rw.Header().Add("Trailer", strings.Join(trailerKeys, ", "))
+ }
+
+ rw.WriteHeader(res.StatusCode)
+
+ err = p.copyResponse(rw, res.Body, p.flushInterval(req, res))
+ if err != nil {
+ defer res.Body.Close()
+ // Since we're streaming the response, if we run into an error all we can do
+ // is abort the request. Issue 23643: ReverseProxy should use ErrAbortHandler
+ // on read error while copying body.
+ if !shouldPanicOnCopyError(req) {
+ p.logf("suppressing panic for copyResponse error in test; copy error: %v", err)
+ return nil, err
+ }
+
+ panic(http.ErrAbortHandler)
+ }
+ res.Body.Close() // close now, instead of defer, to populate res.Trailer
+
+ if len(res.Trailer) > 0 {
+ // Force chunking if we saw a response trailer.
+ // This prevents net/http from calculating the length for short
+ // bodies and adding a Content-Length.
+ if fl, ok := rw.(http.Flusher); ok {
+ fl.Flush()
+ }
+ }
+
+ if len(res.Trailer) == announcedTrailers {
+ copyHeader(rw.Header(), res.Trailer)
+ return res, nil
+ }
+
+ for k, vv := range res.Trailer {
+ k = http.TrailerPrefix + k
+ for _, v := range vv {
+ rw.Header().Add(k, v)
+ }
+ }
+
+ return res, nil
+}
+
+var inOurTests bool // whether we're in our own tests
+
+// shouldPanicOnCopyError reports whether the reverse proxy should
+// panic with http.ErrAbortHandler. This is the right thing to do by
+// default, but Go 1.10 and earlier did not, so existing unit tests
+// weren't expecting panics. Only panic in our own tests, or when
+// running under the HTTP server.
+func shouldPanicOnCopyError(req *http.Request) bool {
+ if inOurTests {
+ // Our tests know to handle this panic.
+ return true
+ }
+ if req.Context().Value(http.ServerContextKey) != nil {
+ // We seem to be running under an HTTP server, so
+ // it'll recover the panic.
+ return true
+ }
+ // Otherwise act like Go 1.10 and earlier to not break
+ // existing tests.
+ return false
+}
+
+// removeConnectionHeaders removes hop-by-hop headers listed in the "Connection" header of h.
+// See RFC 7230, section 6.1
+func removeConnectionHeaders(h http.Header) {
+ if c := h.Get("Connection"); c != "" {
+ for _, f := range strings.Split(c, ",") {
+ if f = strings.TrimSpace(f); f != "" {
+ h.Del(f)
+ }
+ }
+ }
+}
+
+// flushInterval returns the p.FlushInterval value, conditionally
+// overriding its value for a specific request/response.
+func (p *ReverseProxy) flushInterval(req *http.Request, res *http.Response) time.Duration {
+ resCT := res.Header.Get("Content-Type")
+
+ // For Server-Sent Events responses, flush immediately.
+ // The MIME type is defined in https://www.w3.org/TR/eventsource/#text-event-stream
+ if resCT == "text/event-stream" {
+ return -1 // negative means immediately
+ }
+
+ // TODO: more specific cases? e.g. res.ContentLength == -1?
+ return p.FlushInterval
+}
+
+func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader, flushInterval time.Duration) error {
+ if flushInterval != 0 {
+ if wf, ok := dst.(writeFlusher); ok {
+ mlw := &maxLatencyWriter{
+ dst: wf,
+ latency: flushInterval,
+ }
+ defer mlw.stop()
+ dst = mlw
+ }
+ }
+
+ var buf []byte
+ if p.BufferPool != nil {
+ buf = p.BufferPool.Get()
+ defer p.BufferPool.Put(buf)
+ }
+ _, err := p.copyBuffer(dst, src, buf)
+ return err
+}
+
+// copyBuffer returns any write errors or non-EOF read errors, and the amount
+// of bytes written.
+func (p *ReverseProxy) copyBuffer(dst io.Writer, src io.Reader, buf []byte) (int64, error) {
+ if len(buf) == 0 {
+ buf = make([]byte, 32*1024)
+ }
+ var written int64
+ for {
+ nr, rerr := src.Read(buf)
+ if rerr != nil && rerr != io.EOF && rerr != context.Canceled {
+ p.logf("httputil: ReverseProxy read error during body copy: %v", rerr)
+ }
+ if nr > 0 {
+ nw, werr := dst.Write(buf[:nr])
+ if nw > 0 {
+ written += int64(nw)
+ }
+ if werr != nil {
+ return written, werr
+ }
+ if nr != nw {
+ return written, io.ErrShortWrite
+ }
+ }
+ if rerr != nil {
+ if rerr == io.EOF {
+ rerr = nil
+ }
+ return written, rerr
+ }
+ }
+}
+
+func (p *ReverseProxy) logf(format string, args ...interface{}) {
+ if p.ErrorLog != nil {
+ p.ErrorLog.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+type writeFlusher interface {
+ io.Writer
+ http.Flusher
+}
+
+type maxLatencyWriter struct {
+ dst writeFlusher
+ latency time.Duration // non-zero; negative means to flush immediately
+
+ mu sync.Mutex // protects t, flushPending, and dst.Flush
+ t *time.Timer
+ flushPending bool
+}
+
+func (m *maxLatencyWriter) Write(p []byte) (n int, err error) {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ n, err = m.dst.Write(p)
+ if m.latency < 0 {
+ m.dst.Flush()
+ return
+ }
+ if m.flushPending {
+ return
+ }
+ if m.t == nil {
+ m.t = time.AfterFunc(m.latency, m.delayedFlush)
+ } else {
+ m.t.Reset(m.latency)
+ }
+ m.flushPending = true
+ return
+}
+
+func (m *maxLatencyWriter) delayedFlush() {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ if !m.flushPending { // if stop was called but AfterFunc already started this goroutine
+ return
+ }
+ m.dst.Flush()
+ m.flushPending = false
+}
+
+func (m *maxLatencyWriter) stop() {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+ m.flushPending = false
+ if m.t != nil {
+ m.t.Stop()
+ }
+}
+
+func upgradeType(h http.Header) string {
+ if !httpguts.HeaderValuesContainsToken(h["Connection"], "Upgrade") {
+ return ""
+ }
+ return strings.ToLower(h.Get("Upgrade"))
+}
+
+func (p *ReverseProxy) handleUpgradeResponse(rw http.ResponseWriter, req *http.Request, res *http.Response) {
+ reqUpType := upgradeType(req.Header)
+ resUpType := upgradeType(res.Header)
+ if reqUpType != resUpType {
+ p.getErrorHandler()(rw, req, fmt.Errorf("backend tried to switch protocol %q when %q was requested", resUpType, reqUpType))
+ return
+ }
+
+ copyHeader(res.Header, rw.Header())
+
+ hj, ok := rw.(http.Hijacker)
+ if !ok {
+ p.getErrorHandler()(rw, req, fmt.Errorf("can't switch protocols using non-Hijacker ResponseWriter type %T", rw))
+ return
+ }
+ backConn, ok := res.Body.(io.ReadWriteCloser)
+ if !ok {
+ p.getErrorHandler()(rw, req, fmt.Errorf("internal error: 101 switching protocols response with non-writable body"))
+ return
+ }
+ defer backConn.Close()
+ conn, brw, err := hj.Hijack()
+ if err != nil {
+ p.getErrorHandler()(rw, req, fmt.Errorf("Hijack failed on protocol switch: %v", err))
+ return
+ }
+ defer conn.Close()
+ res.Body = nil // so res.Write only writes the headers; we have res.Body in backConn above
+ if err := res.Write(brw); err != nil {
+ p.getErrorHandler()(rw, req, fmt.Errorf("response write: %v", err))
+ return
+ }
+ if err := brw.Flush(); err != nil {
+ p.getErrorHandler()(rw, req, fmt.Errorf("response flush: %v", err))
+ return
+ }
+ errc := make(chan error, 1)
+ spc := switchProtocolCopier{user: conn, backend: backConn}
+ go spc.copyToBackend(errc)
+ go spc.copyFromBackend(errc)
+ <-errc
+ return
+}
+
+// switchProtocolCopier exists so goroutines proxying data back and
+// forth have nice names in stacks.
+type switchProtocolCopier struct {
+ user, backend io.ReadWriter
+}
+
+func (c switchProtocolCopier) copyFromBackend(errc chan<- error) {
+ _, err := io.Copy(c.user, c.backend)
+ errc <- err
+}
+
+func (c switchProtocolCopier) copyToBackend(errc chan<- error) {
+ _, err := io.Copy(c.backend, c.user)
+ errc <- err
+}
diff --git a/modules/caddyhttp/reverseproxy/upstream.go b/modules/caddyhttp/reverseproxy/upstream.go
new file mode 100755
index 0000000..b521d46
--- /dev/null
+++ b/modules/caddyhttp/reverseproxy/upstream.go
@@ -0,0 +1,413 @@
+// Package reverseproxy implements a load-balanced reverse proxy.
+package reverseproxy
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math/rand"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "bitbucket.org/lightcodelabs/caddy2"
+)
+
+// State represents the global state of a loadbalancer. It is used to store
+// references to health checkers.
+type State struct {
+ HealthCheckers []*HealthChecker
+}
+
+// CircuitBreaker defines the functionality of a circuit breaker module.
+type CircuitBreaker interface {
+ Ok() bool
+ RecordMetric(statusCode int, latency time.Duration)
+}
+
+type noopCircuitBreaker struct{}
+
+func (ncb noopCircuitBreaker) RecordMetric(statusCode int, latency time.Duration) {}
+func (ncb noopCircuitBreaker) Ok() bool {
+ return true
+}
+
+const (
+ // TypeBalanceRoundRobin represents the value to use for configuring a load balanced reverse proxy to use round robin load balancing.
+ TypeBalanceRoundRobin = iota
+
+ // TypeBalanceRandom represents the value to use for configuring a load balanced reverse proxy to use random load balancing.
+ TypeBalanceRandom
+
+ // msgNoHealthyUpstreams is returned if there are no upstreams that are healthy to proxy a request to
+ msgNoHealthyUpstreams = "No healthy upstreams."
+
+ // by default perform health checks every 30 seconds
+ defaultHealthCheckDur = time.Second * 30
+
+ // used when an upstream is unhealthy, health checks can be configured to perform at a faster rate
+ defaultFastHealthCheckDur = time.Second * 1
+)
+
+var (
+ // defaultTransport is the default transport to use for the reverse proxy.
+ defaultTransport = &http.Transport{
+ Dial: (&net.Dialer{
+ Timeout: 5 * time.Second,
+ }).Dial,
+ TLSHandshakeTimeout: 5 * time.Second,
+ }
+
+ // defaultHTTPClient is the default http client to use for the healthchecker.
+ defaultHTTPClient = &http.Client{
+ Timeout: time.Second * 10,
+ Transport: defaultTransport,
+ }
+
+ // typeMap maps caddy load balance configuration to the internal representation of the loadbalance algorithm type.
+ typeMap = map[string]int{
+ "round_robin": TypeBalanceRoundRobin,
+ "random": TypeBalanceRandom,
+ }
+)
+
+// NewLoadBalancedReverseProxy returns a collection of Upstreams that are to be loadbalanced.
+func NewLoadBalancedReverseProxy(lb *LoadBalanced, state *State) error {
+ // set defaults
+ if lb.NoHealthyUpstreamsMessage == "" {
+ lb.NoHealthyUpstreamsMessage = msgNoHealthyUpstreams
+ }
+
+ if lb.TryInterval == "" {
+ lb.TryInterval = "20s"
+ }
+
+ // set request retry interval
+ ti, err := time.ParseDuration(lb.TryInterval)
+ if err != nil {
+ return fmt.Errorf("NewLoadBalancedReverseProxy: %v", err.Error())
+ }
+ lb.tryInterval = ti
+
+ // set load balance algorithm
+ t, ok := typeMap[lb.LoadBalanceType]
+ if !ok {
+ t = TypeBalanceRandom
+ }
+ lb.loadBalanceType = t
+
+ // setup each upstream
+ var us []*upstream
+ for _, uc := range lb.Upstreams {
+ // pass the upstream decr and incr methods to keep track of unhealthy nodes
+ nu, err := newUpstream(uc, lb.decrUnhealthy, lb.incrUnhealthy)
+ if err != nil {
+ return err
+ }
+
+ // setup any configured circuit breakers
+ var cbModule = "http.responders.reverse_proxy.circuit_breaker"
+ var cb CircuitBreaker
+
+ if uc.CircuitBreaker != nil {
+ if _, err := caddy2.GetModule(cbModule); err == nil {
+ val, err := caddy2.LoadModule(cbModule, uc.CircuitBreaker)
+ if err == nil {
+ cbv, ok := val.(CircuitBreaker)
+ if ok {
+ cb = cbv
+ } else {
+ fmt.Printf("\nerr: %v; cannot load circuit_breaker, using noop", err.Error())
+ cb = noopCircuitBreaker{}
+ }
+ } else {
+ fmt.Printf("\nerr: %v; cannot load circuit_breaker, using noop", err.Error())
+ cb = noopCircuitBreaker{}
+ }
+ } else {
+ fmt.Println("circuit_breaker module not loaded, using noop")
+ cb = noopCircuitBreaker{}
+ }
+ } else {
+ cb = noopCircuitBreaker{}
+ }
+ nu.CB = cb
+
+ // start a healthcheck worker which will periodically check to see if an upstream is healthy
+ // to proxy requests to.
+ nu.healthChecker = NewHealthCheckWorker(nu, defaultHealthCheckDur, defaultHTTPClient)
+
+ // TODO :- if path is empty why does this empty the entire Target?
+ // nu.Target.Path = uc.HealthCheckPath
+
+ go nu.healthChecker.ScheduleChecks(nu.Target.String())
+ state.HealthCheckers = append(state.HealthCheckers, nu.healthChecker)
+
+ us = append(us, nu)
+ }
+
+ lb.upstreams = us
+
+ return nil
+}
+
+// LoadBalanced represents a collection of upstream hosts that are loadbalanced. It
+// contains multiple features like health checking and circuit breaking functionality
+// for upstreams.
+type LoadBalanced struct {
+ mu sync.Mutex
+ numUnhealthy int32
+ selectedServer int // used during round robin load balancing
+ loadBalanceType int
+ tryInterval time.Duration
+ upstreams []*upstream
+
+ // The following struct fields are set by caddy configuration.
+ // TryInterval is the max duration for which request retrys will be performed for a request.
+ TryInterval string `json:"try_interval"`
+
+ // Upstreams are the configs for upstream hosts
+ Upstreams []*UpstreamConfig `json:"upstreams"`
+
+ // LoadBalanceType is the string representation of what loadbalancing algorithm to use. i.e. "random" or "round_robin".
+ LoadBalanceType string `json:"load_balance_type"`
+
+ // NoHealthyUpstreamsMessage is returned as a response when there are no healthy upstreams to loadbalance to.
+ NoHealthyUpstreamsMessage string `json:"no_healthy_upstreams_message"`
+}
+
+// ServeHTTP implements the http.Handler interface to dispatch an http request to the proper
+// server.
+func (lb *LoadBalanced) ServeHTTP(w http.ResponseWriter, r *http.Request) error {
+ // ensure requests don't hang if an upstream does not respond or is not eventually healthy
+ var u *upstream
+ var done bool
+
+ retryTimer := time.NewTicker(lb.tryInterval)
+ defer retryTimer.Stop()
+
+ go func() {
+ select {
+ case <-retryTimer.C:
+ done = true
+ }
+ }()
+
+ // keep trying to get an available upstream to process the request
+ for {
+ switch lb.loadBalanceType {
+ case TypeBalanceRandom:
+ u = lb.random()
+ case TypeBalanceRoundRobin:
+ u = lb.roundRobin()
+ }
+
+ // if we can't get an upstream and our retry interval has ended return an error response
+ if u == nil && done {
+ w.WriteHeader(http.StatusBadGateway)
+ fmt.Fprint(w, lb.NoHealthyUpstreamsMessage)
+
+ return fmt.Errorf(msgNoHealthyUpstreams)
+ }
+
+ // attempt to get an available upstream
+ if u == nil {
+ continue
+ }
+
+ start := time.Now()
+
+ // if we get an error retry until we get a healthy upstream
+ res, err := u.ReverseProxy.ServeHTTP(w, r)
+ if err != nil {
+ if err == context.Canceled {
+ return nil
+ }
+
+ continue
+ }
+
+ // record circuit breaker metrics
+ go u.CB.RecordMetric(res.StatusCode, time.Now().Sub(start))
+
+ return nil
+ }
+}
+
+// incrUnhealthy increments the amount of unhealthy nodes in a loadbalancer.
+func (lb *LoadBalanced) incrUnhealthy() {
+ atomic.AddInt32(&lb.numUnhealthy, 1)
+}
+
+// decrUnhealthy decrements the amount of unhealthy nodes in a loadbalancer.
+func (lb *LoadBalanced) decrUnhealthy() {
+ atomic.AddInt32(&lb.numUnhealthy, -1)
+}
+
+// roundRobin implements a round robin load balancing algorithm to select
+// which server to forward requests to.
+func (lb *LoadBalanced) roundRobin() *upstream {
+ if atomic.LoadInt32(&lb.numUnhealthy) == int32(len(lb.upstreams)) {
+ return nil
+ }
+
+ selected := lb.upstreams[lb.selectedServer]
+
+ lb.mu.Lock()
+ lb.selectedServer++
+ if lb.selectedServer >= len(lb.upstreams) {
+ lb.selectedServer = 0
+ }
+ lb.mu.Unlock()
+
+ if selected.IsHealthy() && selected.CB.Ok() {
+ return selected
+ }
+
+ return nil
+}
+
+// random implements a random server selector for load balancing.
+func (lb *LoadBalanced) random() *upstream {
+ if atomic.LoadInt32(&lb.numUnhealthy) == int32(len(lb.upstreams)) {
+ return nil
+ }
+
+ n := rand.Int() % len(lb.upstreams)
+ selected := lb.upstreams[n]
+
+ if selected.IsHealthy() && selected.CB.Ok() {
+ return selected
+ }
+
+ return nil
+}
+
+// UpstreamConfig represents the config of an upstream.
+type UpstreamConfig struct {
+ // Host is the host name of the upstream server.
+ Host string `json:"host"`
+
+ // FastHealthCheckDuration is the duration for which a health check is performed when a node is considered unhealthy.
+ FastHealthCheckDuration string `json:"fast_health_check_duration"`
+
+ CircuitBreaker json.RawMessage `json:"circuit_breaker"`
+
+ // // CircuitBreakerConfig is the config passed to setup a circuit breaker.
+ // CircuitBreakerConfig *circuitbreaker.Config `json:"circuit_breaker"`
+ circuitbreaker CircuitBreaker
+
+ // HealthCheckDuration is the default duration for which a health check is performed.
+ HealthCheckDuration string `json:"health_check_duration"`
+
+ // HealthCheckPath is the path at the upstream host to use for healthchecks.
+ HealthCheckPath string `json:"health_check_path"`
+}
+
+// upstream represents an upstream host.
+type upstream struct {
+ Healthy int32 // 0 = false, 1 = true
+ Target *url.URL
+ ReverseProxy *ReverseProxy
+ Incr func()
+ Decr func()
+ CB CircuitBreaker
+ healthChecker *HealthChecker
+ healthCheckDur time.Duration
+ fastHealthCheckDur time.Duration
+}
+
+// newUpstream returns a new upstream.
+func newUpstream(uc *UpstreamConfig, d func(), i func()) (*upstream, error) {
+ host := strings.TrimSpace(uc.Host)
+ protoIdx := strings.Index(host, "://")
+ if protoIdx == -1 || len(host[:protoIdx]) == 0 {
+ return nil, fmt.Errorf("protocol is required for host")
+ }
+
+ hostURL, err := url.Parse(host)
+ if err != nil {
+ return nil, err
+ }
+
+ // parse healthcheck durations
+ hcd, err := time.ParseDuration(uc.HealthCheckDuration)
+ if err != nil {
+ hcd = defaultHealthCheckDur
+ }
+
+ fhcd, err := time.ParseDuration(uc.FastHealthCheckDuration)
+ if err != nil {
+ fhcd = defaultFastHealthCheckDur
+ }
+
+ u := upstream{
+ healthCheckDur: hcd,
+ fastHealthCheckDur: fhcd,
+ Target: hostURL,
+ Decr: d,
+ Incr: i,
+ Healthy: int32(0), // assume is unhealthy on start
+ }
+
+ u.ReverseProxy = newReverseProxy(hostURL, u.SetHealthiness)
+ return &u, nil
+}
+
+// SetHealthiness sets whether an upstream is healthy or not. The health check worker is updated to
+// perform checks faster if a node is unhealthy.
+func (u *upstream) SetHealthiness(ok bool) {
+ h := atomic.LoadInt32(&u.Healthy)
+ var wasHealthy bool
+ if h == 1 {
+ wasHealthy = true
+ } else {
+ wasHealthy = false
+ }
+
+ if ok {
+ u.healthChecker.Ticker = time.NewTicker(u.healthCheckDur)
+
+ if !wasHealthy {
+ atomic.AddInt32(&u.Healthy, 1)
+ u.Decr()
+ }
+ } else {
+ u.healthChecker.Ticker = time.NewTicker(u.fastHealthCheckDur)
+
+ if wasHealthy {
+ atomic.AddInt32(&u.Healthy, -1)
+ u.Incr()
+ }
+ }
+}
+
+// IsHealthy returns whether an Upstream is healthy or not.
+func (u *upstream) IsHealthy() bool {
+ i := atomic.LoadInt32(&u.Healthy)
+ if i == 1 {
+ return true
+ }
+
+ return false
+}
+
+// newReverseProxy returns a new reverse proxy handler.
+func newReverseProxy(target *url.URL, setHealthiness func(bool)) *ReverseProxy {
+ errorHandler := func(w http.ResponseWriter, r *http.Request, err error) {
+ // we don't need to worry about cancelled contexts since this doesn't necessarilly mean that
+ // the upstream is unhealthy.
+ if err != context.Canceled {
+ setHealthiness(false)
+ }
+ }
+
+ rp := NewSingleHostReverseProxy(target)
+ rp.ErrorHandler = errorHandler
+ rp.Transport = defaultTransport // use default transport that times out in 5 seconds
+ return rp
+}