summaryrefslogtreecommitdiff
path: root/modules/caddyhttp/reverseproxy/reverseproxy.go
diff options
context:
space:
mode:
authorFrancis Lavoie <lavofr@gmail.com>2022-03-09 13:00:51 -0500
committerGitHub <noreply@github.com>2022-03-09 11:00:51 -0700
commitc7d6c4cbb951f7db87fc5aebf8382aeeca6c9f1d (patch)
tree7d210cd4f76ed91724da905250d90e2091078aed /modules/caddyhttp/reverseproxy/reverseproxy.go
parentd0b608af3178bc674936f4b1c6cce00591ebbf09 (diff)
reverseproxy: copy_response and copy_response_headers for handle_response routes (#4391)
* reverseproxy: New `copy_response` handler for `handle_response` routes Followup to #4298 and #4388. This adds a new `copy_response` handler which may only be used in `reverse_proxy`'s `handle_response` routes, which can be used to actually copy the proxy response downstream. Previously, if `handle_response` was used (with routes, not the status code mode), it was impossible to use the upstream's response body at all, because we would always close the body, expecting the routes to write a new body from scratch. To implement this, I had to refactor `h.reverseProxy()` to move all the code that came after the `HandleResponse` loop into a new function. This new function `h.finalizeResponse()` takes care of preparing the response by removing extra headers, dealing with trailers, then copying the headers and body downstream. Since basically what we want `copy_response` to do is invoke `h.finalizeResponse()` at a configurable point in time, we need to pass down the proxy handler, the response, and some other state via a new `req.WithContext(ctx)`. Wrapping a new context is pretty much the only way we have to jump a few layers in the HTTP middleware chain and let a handler pick up this information. Feels a bit dirty, but it works. Also fixed a bug with the `http.reverse_proxy.upstream.duration` placeholder, it always had the same duration as `http.reverse_proxy.upstream.latency`, but the former was meant to be the time taken for the roundtrip _plus_ copying/writing the response. * Delete the "Content-Length" header if we aren't copying Fixes a bug where the Content-Length will mismatch the actual bytes written if we skipped copying the response, so we get a message like this when using curl: ``` curl: (18) transfer closed with 18 bytes remaining to read ``` To replicate: ``` { admin off debug } :8881 { reverse_proxy 127.0.0.1:8882 { @200 status 200 handle_response @200 { header Foo bar } } } :8882 { header Content-Type application/json respond `{"hello": "world"}` 200 } ``` * Implement `copy_response_headers`, with include/exclude list support * Apply suggestions from code review Co-authored-by: Matt Holt <mholt@users.noreply.github.com>
Diffstat (limited to 'modules/caddyhttp/reverseproxy/reverseproxy.go')
-rw-r--r--modules/caddyhttp/reverseproxy/reverseproxy.go87
1 files changed, 82 insertions, 5 deletions
diff --git a/modules/caddyhttp/reverseproxy/reverseproxy.go b/modules/caddyhttp/reverseproxy/reverseproxy.go
index 3355f0b..2131a91 100644
--- a/modules/caddyhttp/reverseproxy/reverseproxy.go
+++ b/modules/caddyhttp/reverseproxy/reverseproxy.go
@@ -790,12 +790,33 @@ func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, repl *
h.logger.Debug("handling response", zap.Int("handler", i))
+ // we make some data available via request context to child routes
+ // so that they may inherit some options and functions from the
+ // handler, and be able to copy the response.
+ hrc := &handleResponseContext{
+ handler: h,
+ response: res,
+ start: start,
+ logger: logger,
+ }
+ ctx := req.Context()
+ ctx = context.WithValue(ctx, proxyHandleResponseContextCtxKey, hrc)
+
// pass the request through the response handler routes
- routeErr := rh.Routes.Compile(next).ServeHTTP(rw, req)
+ routeErr := rh.Routes.Compile(next).ServeHTTP(rw, req.WithContext(ctx))
+
+ // if the response handler routes already finalized the response,
+ // we can return early. It should be finalized if the routes executed
+ // included a copy_response handler. If a fresh response was written
+ // by the routes instead, then we still need to finalize the response
+ // without copying the body.
+ if routeErr == nil && hrc.isFinalized {
+ return nil
+ }
- // always close the response body afterwards since it's expected
+ // always close the response body afterwards, since it's expected
// that the response handler routes will have written to the
- // response writer with a new body
+ // response writer with a new body, if it wasn't already finalized.
res.Body.Close()
bodyClosed = true
@@ -804,8 +825,25 @@ func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, repl *
// the roundtrip was successful and to not retry
return roundtripSucceeded{routeErr}
}
+
+ // we've already closed the body, so there's no use allowing
+ // another response handler to run as well
+ break
}
+ return h.finalizeResponse(rw, req, res, repl, start, logger, bodyClosed)
+}
+
+// finalizeResponse prepares and copies the response.
+func (h Handler) finalizeResponse(
+ rw http.ResponseWriter,
+ req *http.Request,
+ res *http.Response,
+ repl *caddy.Replacer,
+ start time.Time,
+ logger *zap.Logger,
+ bodyClosed bool,
+) error {
// deal with 101 Switching Protocols responses: (WebSocket, h2c, etc)
if res.StatusCode == http.StatusSwitchingProtocols {
h.handleUpgradeResponse(logger, rw, req, res)
@@ -818,6 +856,13 @@ func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, repl *
res.Header.Del(h)
}
+ // remove the content length if we're not going to be copying
+ // from the response, because otherwise there'll be a mismatch
+ // between bytes written and the advertised length
+ if bodyClosed {
+ res.Header.Del("Content-Length")
+ }
+
// apply any response header operations
if h.Headers != nil && h.Headers.Response != nil {
if h.Headers.Response.Require == nil ||
@@ -841,7 +886,7 @@ func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, repl *
rw.WriteHeader(res.StatusCode)
if !bodyClosed {
- err = h.copyResponse(rw, res.Body, h.flushInterval(req, res))
+ err := h.copyResponse(rw, res.Body, h.flushInterval(req, res))
res.Body.Close() // close now, instead of defer, to populate res.Trailer
if err != nil {
// we're streaming the response and we've already written headers, so
@@ -863,7 +908,7 @@ func (h *Handler) reverseProxy(rw http.ResponseWriter, req *http.Request, repl *
}
// total duration spent proxying, including writing response body
- repl.Set("http.reverse_proxy.upstream.duration", duration)
+ repl.Set("http.reverse_proxy.upstream.duration", time.Since(start))
if len(res.Trailer) == announcedTrailers {
copyHeader(rw.Header(), res.Trailer)
@@ -1227,6 +1272,38 @@ var bufPool = sync.Pool{
},
}
+// handleResponseContext carries some contextual information about the
+// the current proxy handling.
+type handleResponseContext struct {
+ // handler is the active proxy handler instance, so that
+ // routes like copy_response may inherit some config
+ // options and have access to handler methods.
+ handler *Handler
+
+ // response is the actual response received from the proxy
+ // roundtrip, to potentially be copied if a copy_response
+ // handler is in the handle_response routes.
+ response *http.Response
+
+ // start is the time just before the proxy roundtrip was
+ // performed, used for logging.
+ start time.Time
+
+ // logger is the prepared logger which is used to write logs
+ // with the request, duration, and selected upstream attached.
+ logger *zap.Logger
+
+ // isFinalized is whether the response has been finalized,
+ // i.e. copied and closed, to make sure that it doesn't
+ // happen twice.
+ isFinalized bool
+}
+
+// proxyHandleResponseContextCtxKey is the context key for the active proxy handler
+// so that handle_response routes can inherit some config options
+// from the proxy handler.
+const proxyHandleResponseContextCtxKey caddy.CtxKey = "reverse_proxy_handle_response_context"
+
// Interface guards
var (
_ caddy.Provisioner = (*Handler)(nil)