summaryrefslogtreecommitdiff
path: root/modules/caddyhttp/reverseproxy
diff options
context:
space:
mode:
Diffstat (limited to 'modules/caddyhttp/reverseproxy')
-rw-r--r--modules/caddyhttp/reverseproxy/fastcgi/fastcgi.go2
-rw-r--r--modules/caddyhttp/reverseproxy/healthchecks.go72
-rw-r--r--modules/caddyhttp/reverseproxy/hosts.go18
-rw-r--r--modules/caddyhttp/reverseproxy/ntlm.go4
-rw-r--r--modules/caddyhttp/reverseproxy/reverseproxy.go87
5 files changed, 152 insertions, 31 deletions
diff --git a/modules/caddyhttp/reverseproxy/fastcgi/fastcgi.go b/modules/caddyhttp/reverseproxy/fastcgi/fastcgi.go
index aff9a6e..f3f979d 100644
--- a/modules/caddyhttp/reverseproxy/fastcgi/fastcgi.go
+++ b/modules/caddyhttp/reverseproxy/fastcgi/fastcgi.go
@@ -57,7 +57,7 @@ type Transport struct {
// that 404's if the fastcgi path info is not found.
SplitPath string `json:"split_path,omitempty"`
- // Extra environment variables
+ // Extra environment variables.
EnvVars map[string]string `json:"env,omitempty"`
// The duration used to set a deadline when connecting to an upstream.
diff --git a/modules/caddyhttp/reverseproxy/healthchecks.go b/modules/caddyhttp/reverseproxy/healthchecks.go
index a64d845..76ee945 100644
--- a/modules/caddyhttp/reverseproxy/healthchecks.go
+++ b/modules/caddyhttp/reverseproxy/healthchecks.go
@@ -31,9 +31,16 @@ import (
"go.uber.org/zap"
)
-// HealthChecks holds configuration related to health checking.
+// HealthChecks configures active and passive health checks.
type HealthChecks struct {
- Active *ActiveHealthChecks `json:"active,omitempty"`
+ // Active health checks run in the background on a timer. To
+ // minimally enable active health checks, set either path or
+ // port (or both).
+ Active *ActiveHealthChecks `json:"active,omitempty"`
+
+ // Passive health checks monitor proxied requests for errors or timeouts.
+ // To minimally enable passive health checks, specify at least an empty
+ // config object.
Passive *PassiveHealthChecks `json:"passive,omitempty"`
}
@@ -41,14 +48,33 @@ type HealthChecks struct {
// health checks (that is, health checks which occur in a
// background goroutine independently).
type ActiveHealthChecks struct {
- Path string `json:"path,omitempty"`
- Port int `json:"port,omitempty"`
- Headers http.Header `json:"headers,omitempty"`
- Interval caddy.Duration `json:"interval,omitempty"`
- Timeout caddy.Duration `json:"timeout,omitempty"`
- MaxSize int64 `json:"max_size,omitempty"`
- ExpectStatus int `json:"expect_status,omitempty"`
- ExpectBody string `json:"expect_body,omitempty"`
+ // The URI path to use for health checks.
+ Path string `json:"path,omitempty"`
+
+ // The port to use (if different from the upstream's dial
+ // address) for health checks.
+ Port int `json:"port,omitempty"`
+
+ // HTTP headers to set on health check requests.
+ Headers http.Header `json:"headers,omitempty"`
+
+ // How frequently to perform active health checks (default 30s).
+ Interval caddy.Duration `json:"interval,omitempty"`
+
+ // How long to wait for a response from a backend before
+ // considering it unhealthy (default 5s).
+ Timeout caddy.Duration `json:"timeout,omitempty"`
+
+ // The maximum response body to download from the backend
+ // during a health check.
+ MaxSize int64 `json:"max_size,omitempty"`
+
+ // The HTTP status code to expect from a healthy backend.
+ ExpectStatus int `json:"expect_status,omitempty"`
+
+ // A regular expression against which to match the response
+ // body of a healthy backend.
+ ExpectBody string `json:"expect_body,omitempty"`
stopChan chan struct{}
httpClient *http.Client
@@ -60,11 +86,27 @@ type ActiveHealthChecks struct {
// health checks (that is, health checks which occur during
// the normal flow of request proxying).
type PassiveHealthChecks struct {
- MaxFails int `json:"max_fails,omitempty"`
- FailDuration caddy.Duration `json:"fail_duration,omitempty"`
- UnhealthyRequestCount int `json:"unhealthy_request_count,omitempty"`
- UnhealthyStatus []int `json:"unhealthy_status,omitempty"`
- UnhealthyLatency caddy.Duration `json:"unhealthy_latency,omitempty"`
+ // How long to remember a failed request to a backend. A duration > 0
+ // enables passive health checking. Default is 0.
+ FailDuration caddy.Duration `json:"fail_duration,omitempty"`
+
+ // The number of failed requests within the FailDuration window to
+ // consider a backend as "down". Must be >= 1; default is 1. Requires
+ // that FailDuration be > 0.
+ MaxFails int `json:"max_fails,omitempty"`
+
+ // Limits the number of simultaneous requests to a backend by
+ // marking the backend as "down" if it has this many concurrent
+ // requests or more.
+ UnhealthyRequestCount int `json:"unhealthy_request_count,omitempty"`
+
+ // Count the request as failed if the response comes back with
+ // one of these status codes.
+ UnhealthyStatus []int `json:"unhealthy_status,omitempty"`
+
+ // Count the request as failed if the response takes at least this
+ // long to receive.
+ UnhealthyLatency caddy.Duration `json:"unhealthy_latency,omitempty"`
logger *zap.Logger
}
diff --git a/modules/caddyhttp/reverseproxy/hosts.go b/modules/caddyhttp/reverseproxy/hosts.go
index 8bad7c2..e7c61fb 100644
--- a/modules/caddyhttp/reverseproxy/hosts.go
+++ b/modules/caddyhttp/reverseproxy/hosts.go
@@ -61,8 +61,22 @@ type UpstreamPool []*Upstream
type Upstream struct {
Host `json:"-"`
- Dial string `json:"dial,omitempty"`
- MaxRequests int `json:"max_requests,omitempty"`
+ // The [network address](/docs/json/apps/http/#servers/listen)
+ // to dial to connect to the upstream. Must represent precisely
+ // one socket (i.e. no port ranges). A valid network address
+ // either has a host and port, or is a unix socket address.
+ //
+ // Placeholders may be used to make the upstream dynamic, but be
+ // aware of the health check implications of this: a single
+ // upstream that represents numerous (perhaps arbitrary) backends
+ // can be considered down if one or enough of the arbitrary
+ // backends is down. Also be aware of open proxy vulnerabilities.
+ Dial string `json:"dial,omitempty"`
+
+ // The maximum number of simultaneous requests to allow to
+ // this upstream. If set, overrides the global passive health
+ // check UnhealthyRequestCount value.
+ MaxRequests int `json:"max_requests,omitempty"`
// TODO: This could be really useful, to bind requests
// with certain properties to specific backends
diff --git a/modules/caddyhttp/reverseproxy/ntlm.go b/modules/caddyhttp/reverseproxy/ntlm.go
index ea2bb85..be4330f 100644
--- a/modules/caddyhttp/reverseproxy/ntlm.go
+++ b/modules/caddyhttp/reverseproxy/ntlm.go
@@ -46,6 +46,10 @@ func init() {
//
// This transport also forces HTTP/1.1 and Keep-Alives in order
// for NTLM to succeed.
+//
+// It is basically the same thing as
+// [nginx's paid ntlm directive](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#ntlm)
+// (but is free in Caddy!).
type NTLMTransport struct {
*HTTPTransport
diff --git a/modules/caddyhttp/reverseproxy/reverseproxy.go b/modules/caddyhttp/reverseproxy/reverseproxy.go
index d353dc1..24389b2 100644
--- a/modules/caddyhttp/reverseproxy/reverseproxy.go
+++ b/modules/caddyhttp/reverseproxy/reverseproxy.go
@@ -41,15 +41,56 @@ func init() {
}
// Handler implements a highly configurable and production-ready reverse proxy.
+// Upon proxying, this module sets the following placeholders (which can be used
+// both within and after this handler):
+//
+// {http.reverse_proxy.upstream.address}
+// The full address to the upstream as given in the config
+// {http.reverse_proxy.upstream.hostport}
+// The host:port of the upstream
+// {http.reverse_proxy.upstream.host}
+// The host of the upstream
+// {http.reverse_proxy.upstream.port}
+// The port of the upstream
+// {http.reverse_proxy.upstream.requests}
+// The approximate current number of requests to the upstream
+// {http.reverse_proxy.upstream.max_requests}
+// The maximum approximate number of requests allowed to the upstream
+// {http.reverse_proxy.upstream.fails}
+// The number of recent failed requests to the upstream
+//
type Handler struct {
- TransportRaw json.RawMessage `json:"transport,omitempty" caddy:"namespace=http.reverse_proxy.transport inline_key=protocol"`
- CBRaw json.RawMessage `json:"circuit_breaker,omitempty" caddy:"namespace=http.reverse_proxy.circuit_breakers inline_key=type"`
- LoadBalancing *LoadBalancing `json:"load_balancing,omitempty"`
- HealthChecks *HealthChecks `json:"health_checks,omitempty"`
- Upstreams UpstreamPool `json:"upstreams,omitempty"`
- FlushInterval caddy.Duration `json:"flush_interval,omitempty"`
- Headers *headers.Handler `json:"headers,omitempty"`
- BufferRequests bool `json:"buffer_requests,omitempty"`
+ // Configures the method of transport for the proxy. A transport
+ // is what performs the actual "round trip" to the backend.
+ // The default transport is plaintext HTTP.
+ TransportRaw json.RawMessage `json:"transport,omitempty" caddy:"namespace=http.reverse_proxy.transport inline_key=protocol"`
+
+ // A circuit breaker may be used to relieve pressure on a backend
+ // that is beginning to exhibit symptoms of stress or latency.
+ // By default, there is no circuit breaker.
+ CBRaw json.RawMessage `json:"circuit_breaker,omitempty" caddy:"namespace=http.reverse_proxy.circuit_breakers inline_key=type"`
+
+ // Load balancing distributes load/requests between backends.
+ LoadBalancing *LoadBalancing `json:"load_balancing,omitempty"`
+
+ // Health checks update the status of backends, whether they are
+ // up or down. Down backends will not be proxied to.
+ HealthChecks *HealthChecks `json:"health_checks,omitempty"`
+
+ // Upstreams is the list of backends to proxy to.
+ Upstreams UpstreamPool `json:"upstreams,omitempty"`
+
+ // TODO: figure out good defaults and write docs for this
+ // (see https://github.com/caddyserver/caddy/issues/1460)
+ FlushInterval caddy.Duration `json:"flush_interval,omitempty"`
+
+ // Headers manipulates headers between Caddy and the backend.
+ Headers *headers.Handler `json:"headers,omitempty"`
+
+ // If true, the entire request body will be read and buffered
+ // in memory before being proxied to the backend. This should
+ // be avoided if at all possible for performance reasons.
+ BufferRequests bool `json:"buffer_requests,omitempty"`
Transport http.RoundTripper `json:"-"`
CB CircuitBreaker `json:"-"`
@@ -140,7 +181,7 @@ func (h *Handler) Provision(ctx caddy.Context) error {
timeout := time.Duration(h.HealthChecks.Active.Timeout)
if timeout == 0 {
- timeout = 10 * time.Second
+ timeout = 5 * time.Second
}
h.HealthChecks.Active.stopChan = make(chan struct{})
@@ -649,10 +690,30 @@ func removeConnectionHeaders(h http.Header) {
// LoadBalancing has parameters related to load balancing.
type LoadBalancing struct {
- SelectionPolicyRaw json.RawMessage `json:"selection_policy,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"`
- TryDuration caddy.Duration `json:"try_duration,omitempty"`
- TryInterval caddy.Duration `json:"try_interval,omitempty"`
- RetryMatchRaw caddyhttp.RawMatcherSets `json:"retry_match,omitempty" caddy:"namespace=http.matchers"`
+ // A selection policy is how to choose an available backend.
+ // The default policy is random selection.
+ SelectionPolicyRaw json.RawMessage `json:"selection_policy,omitempty" caddy:"namespace=http.reverse_proxy.selection_policies inline_key=policy"`
+
+ // How long to try selecting available backends for each request
+ // if the next available host is down. By default, this retry is
+ // disabled. Clients will wait for up to this long while the load
+ // balancer tries to find an available upstream host.
+ TryDuration caddy.Duration `json:"try_duration,omitempty"`
+
+ // How long to wait between selecting the next host from the pool. Default
+ // is 250ms. Only relevant when a request to an upstream host fails. Be
+ // aware that setting this to 0 with a non-zero try_duration can cause the
+ // CPU to spin if all backends are down and latency is very low.
+ TryInterval caddy.Duration `json:"try_interval,omitempty"`
+
+ // A list of matcher sets that restricts with which requests retries are
+ // allowed. A request must match any of the given matcher sets in order
+ // to be retried if the connection to the upstream succeeded but the
+ // subsequent round-trip failed. If the connection to the upstream failed,
+ // a retry is always allowed. If unspecified, only GET requests will be
+ // allowed to be retried. Note that a retry is done with the next available
+ // host according to the load balancing policy.
+ RetryMatchRaw caddyhttp.RawMatcherSets `json:"retry_match,omitempty" caddy:"namespace=http.matchers"`
SelectionPolicy Selector `json:"-"`
RetryMatch caddyhttp.MatcherSets `json:"-"`