summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatthew Holt <mholt@users.noreply.github.com>2019-11-15 17:15:33 -0700
committerMatthew Holt <mholt@users.noreply.github.com>2019-11-15 17:15:33 -0700
commit1228dd7d93b59409fd844400b9c114be267df3a3 (patch)
treefec39d2abcaba8c2a4cdcdcbb17e39a0c8e905da
parentaf26a03da1af7cd7d64fc03270961829d0311ed7 (diff)
reverse_proxy: Allow buffering of client requests
This is a bad idea, but some backends apparently require it. See discussion in #176.
-rw-r--r--modules/caddyhttp/reverseproxy/reverseproxy.go51
-rw-r--r--modules/caddyhttp/reverseproxy/streaming.go1
2 files changed, 39 insertions, 13 deletions
diff --git a/modules/caddyhttp/reverseproxy/reverseproxy.go b/modules/caddyhttp/reverseproxy/reverseproxy.go
index 77dc005..98118c2 100644
--- a/modules/caddyhttp/reverseproxy/reverseproxy.go
+++ b/modules/caddyhttp/reverseproxy/reverseproxy.go
@@ -15,14 +15,18 @@
package reverseproxy
import (
+ "bytes"
"context"
"encoding/json"
"fmt"
+ "io"
+ "io/ioutil"
"net"
"net/http"
"regexp"
"strconv"
"strings"
+ "sync"
"time"
"github.com/caddyserver/caddy/v2"
@@ -38,13 +42,14 @@ func init() {
// Handler implements a highly configurable and production-ready reverse proxy.
type Handler struct {
- TransportRaw json.RawMessage `json:"transport,omitempty"`
- CBRaw json.RawMessage `json:"circuit_breaker,omitempty"`
- LoadBalancing *LoadBalancing `json:"load_balancing,omitempty"`
- HealthChecks *HealthChecks `json:"health_checks,omitempty"`
- Upstreams UpstreamPool `json:"upstreams,omitempty"`
- FlushInterval caddy.Duration `json:"flush_interval,omitempty"`
- Headers *headers.Handler `json:"headers,omitempty"`
+ TransportRaw json.RawMessage `json:"transport,omitempty"`
+ CBRaw json.RawMessage `json:"circuit_breaker,omitempty"`
+ LoadBalancing *LoadBalancing `json:"load_balancing,omitempty"`
+ HealthChecks *HealthChecks `json:"health_checks,omitempty"`
+ Upstreams UpstreamPool `json:"upstreams,omitempty"`
+ FlushInterval caddy.Duration `json:"flush_interval,omitempty"`
+ Headers *headers.Handler `json:"headers,omitempty"`
+ BufferRequests bool `json:"buffer_requests,omitempty"`
Transport http.RoundTripper `json:"-"`
CB CircuitBreaker `json:"-"`
@@ -222,6 +227,27 @@ func (h *Handler) Cleanup() error {
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
repl := r.Context().Value(caddy.ReplacerCtxKey).(caddy.Replacer)
+ // if enabled, buffer client request;
+ // this should only be enabled if the
+ // upstream requires it and does not
+ // work with "slow clients" (gunicorn,
+ // etc.) - this obviously has a perf
+ // overhead and makes the proxy at
+ // risk of exhausting memory and more
+ // suseptible to slowloris attacks,
+ // so it is strongly recommended to
+ // only use this feature if absolutely
+ // required, if read timeouts are set,
+ // and if body size is limited
+ if h.BufferRequests {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+ io.Copy(buf, r.Body)
+ r.Body.Close()
+ r.Body = ioutil.NopCloser(buf)
+ }
+
// prepare the request for proxying; this is needed only once
err := h.prepareRequest(r)
if err != nil {
@@ -662,12 +688,11 @@ type DialError struct {
error
}
-// TODO: see if we can use this
-// var bufPool = sync.Pool{
-// New: func() interface{} {
-// return new(bytes.Buffer)
-// },
-// }
+var bufPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+}
// Interface guards
var (
diff --git a/modules/caddyhttp/reverseproxy/streaming.go b/modules/caddyhttp/reverseproxy/streaming.go
index a3b711b..3ab122c 100644
--- a/modules/caddyhttp/reverseproxy/streaming.go
+++ b/modules/caddyhttp/reverseproxy/streaming.go
@@ -110,6 +110,7 @@ func (h Handler) copyResponse(dst io.Writer, src io.Reader, flushInterval time.D
// buf = h.BufferPool.Get()
// defer h.BufferPool.Put(buf)
// }
+ // // we could also see about a pool that returns values like this: make([]byte, 32*1024)
_, err := h.copyBuffer(dst, src, buf)
return err
}