summaryrefslogtreecommitdiff
path: root/modules/caddyhttp/reverseproxy/caddyfile.go
diff options
context:
space:
mode:
authorMatthew Holt <mholt@users.noreply.github.com>2022-08-25 21:42:48 -0600
committerMatthew Holt <mholt@users.noreply.github.com>2022-08-25 21:42:48 -0600
commit5fb5b81439a521294a88cf4ad6da49fdc350f3aa (patch)
treeb0dbac5a90edc8ba3bbb87ffcfa2d9588c997794 /modules/caddyhttp/reverseproxy/caddyfile.go
parent2cc5d38229d92838b2753fd0e67e083f70251f75 (diff)
reverseproxy: Multiple dynamic upstreams
This allows users to, for example, get upstreams from multiple SRV endpoints in order (such as primary and secondary clusters). Also, gofmt went to town on the comments, sigh
Diffstat (limited to 'modules/caddyhttp/reverseproxy/caddyfile.go')
-rw-r--r--modules/caddyhttp/reverseproxy/caddyfile.go257
1 files changed, 142 insertions, 115 deletions
diff --git a/modules/caddyhttp/reverseproxy/caddyfile.go b/modules/caddyhttp/reverseproxy/caddyfile.go
index a2c85f9..f746ee5 100644
--- a/modules/caddyhttp/reverseproxy/caddyfile.go
+++ b/modules/caddyhttp/reverseproxy/caddyfile.go
@@ -52,73 +52,73 @@ func parseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHandler, error)
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
//
-// reverse_proxy [<matcher>] [<upstreams...>] {
-// # backends
-// to <upstreams...>
-// dynamic <name> [...]
+// reverse_proxy [<matcher>] [<upstreams...>] {
+// # backends
+// to <upstreams...>
+// dynamic <name> [...]
//
-// # load balancing
-// lb_policy <name> [<options...>]
-// lb_retries <retries>
-// lb_try_duration <duration>
-// lb_try_interval <interval>
-// lb_retry_match <request-matcher>
+// # load balancing
+// lb_policy <name> [<options...>]
+// lb_retries <retries>
+// lb_try_duration <duration>
+// lb_try_interval <interval>
+// lb_retry_match <request-matcher>
//
-// # active health checking
-// health_uri <uri>
-// health_port <port>
-// health_interval <interval>
-// health_timeout <duration>
-// health_status <status>
-// health_body <regexp>
-// health_headers {
-// <field> [<values...>]
-// }
+// # active health checking
+// health_uri <uri>
+// health_port <port>
+// health_interval <interval>
+// health_timeout <duration>
+// health_status <status>
+// health_body <regexp>
+// health_headers {
+// <field> [<values...>]
+// }
//
-// # passive health checking
-// fail_duration <duration>
-// max_fails <num>
-// unhealthy_status <status>
-// unhealthy_latency <duration>
-// unhealthy_request_count <num>
+// # passive health checking
+// fail_duration <duration>
+// max_fails <num>
+// unhealthy_status <status>
+// unhealthy_latency <duration>
+// unhealthy_request_count <num>
//
-// # streaming
-// flush_interval <duration>
-// buffer_requests
-// buffer_responses
-// max_buffer_size <size>
+// # streaming
+// flush_interval <duration>
+// buffer_requests
+// buffer_responses
+// max_buffer_size <size>
//
-// # request manipulation
-// trusted_proxies [private_ranges] <ranges...>
-// header_up [+|-]<field> [<value|regexp> [<replacement>]]
-// header_down [+|-]<field> [<value|regexp> [<replacement>]]
-// method <method>
-// rewrite <to>
+// # request manipulation
+// trusted_proxies [private_ranges] <ranges...>
+// header_up [+|-]<field> [<value|regexp> [<replacement>]]
+// header_down [+|-]<field> [<value|regexp> [<replacement>]]
+// method <method>
+// rewrite <to>
//
-// # round trip
-// transport <name> {
-// ...
-// }
+// # round trip
+// transport <name> {
+// ...
+// }
//
-// # optionally intercept responses from upstream
-// @name {
-// status <code...>
-// header <field> [<value>]
-// }
-// replace_status [<matcher>] <status_code>
-// handle_response [<matcher>] {
-// <directives...>
+// # optionally intercept responses from upstream
+// @name {
+// status <code...>
+// header <field> [<value>]
+// }
+// replace_status [<matcher>] <status_code>
+// handle_response [<matcher>] {
+// <directives...>
//
-// # special directives only available in handle_response
-// copy_response [<matcher>] [<status>] {
-// status <status>
-// }
-// copy_response_headers [<matcher>] {
-// include <fields...>
-// exclude <fields...>
-// }
-// }
-// }
+// # special directives only available in handle_response
+// copy_response [<matcher>] [<status>] {
+// status <status>
+// }
+// copy_response_headers [<matcher>] {
+// include <fields...>
+// exclude <fields...>
+// }
+// }
+// }
//
// Proxy upstream addresses should be network dial addresses such
// as `host:port`, or a URL such as `scheme://host:port`. Scheme
@@ -824,33 +824,32 @@ func (h *Handler) FinalizeUnmarshalCaddyfile(helper httpcaddyfile.Helper) error
// UnmarshalCaddyfile deserializes Caddyfile tokens into h.
//
-// transport http {
-// read_buffer <size>
-// write_buffer <size>
-// max_response_header <size>
-// dial_timeout <duration>
-// dial_fallback_delay <duration>
-// response_header_timeout <duration>
-// expect_continue_timeout <duration>
-// resolvers <resolvers...>
-// tls
-// tls_client_auth <automate_name> | <cert_file> <key_file>
-// tls_insecure_skip_verify
-// tls_timeout <duration>
-// tls_trusted_ca_certs <cert_files...>
-// tls_server_name <sni>
-// tls_renegotiation <level>
-// tls_except_ports <ports...>
-// keepalive [off|<duration>]
-// keepalive_interval <interval>
-// keepalive_idle_conns <max_count>
-// keepalive_idle_conns_per_host <count>
-// versions <versions...>
-// compression off
-// max_conns_per_host <count>
-// max_idle_conns_per_host <count>
-// }
-//
+// transport http {
+// read_buffer <size>
+// write_buffer <size>
+// max_response_header <size>
+// dial_timeout <duration>
+// dial_fallback_delay <duration>
+// response_header_timeout <duration>
+// expect_continue_timeout <duration>
+// resolvers <resolvers...>
+// tls
+// tls_client_auth <automate_name> | <cert_file> <key_file>
+// tls_insecure_skip_verify
+// tls_timeout <duration>
+// tls_trusted_ca_certs <cert_files...>
+// tls_server_name <sni>
+// tls_renegotiation <level>
+// tls_except_ports <ports...>
+// keepalive [off|<duration>]
+// keepalive_interval <interval>
+// keepalive_idle_conns <max_count>
+// keepalive_idle_conns_per_host <count>
+// versions <versions...>
+// compression off
+// max_conns_per_host <count>
+// max_idle_conns_per_host <count>
+// }
func (h *HTTPTransport) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
for d.NextBlock(0) {
@@ -1138,10 +1137,9 @@ func parseCopyResponseCaddyfile(h httpcaddyfile.Helper) (caddyhttp.MiddlewareHan
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
//
-// copy_response [<matcher>] [<status>] {
-// status <status>
-// }
-//
+// copy_response [<matcher>] [<status>] {
+// status <status>
+// }
func (h *CopyResponseHandler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
args := d.RemainingArgs()
@@ -1178,11 +1176,10 @@ func parseCopyResponseHeadersCaddyfile(h httpcaddyfile.Helper) (caddyhttp.Middle
// UnmarshalCaddyfile sets up the handler from Caddyfile tokens. Syntax:
//
-// copy_response_headers [<matcher>] {
-// include <fields...>
-// exclude <fields...>
-// }
-//
+// copy_response_headers [<matcher>] {
+// include <fields...>
+// exclude <fields...>
+// }
func (h *CopyResponseHeadersHandler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
args := d.RemainingArgs()
@@ -1208,16 +1205,15 @@ func (h *CopyResponseHeadersHandler) UnmarshalCaddyfile(d *caddyfile.Dispenser)
// UnmarshalCaddyfile deserializes Caddyfile tokens into h.
//
-// dynamic srv [<name>] {
-// service <service>
-// proto <proto>
-// name <name>
-// refresh <interval>
-// resolvers <resolvers...>
-// dial_timeout <timeout>
-// dial_fallback_delay <timeout>
-// }
-//
+// dynamic srv [<name>] {
+// service <service>
+// proto <proto>
+// name <name>
+// refresh <interval>
+// resolvers <resolvers...>
+// dial_timeout <timeout>
+// dial_fallback_delay <timeout>
+// }
func (u *SRVUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
args := d.RemainingArgs()
@@ -1307,15 +1303,14 @@ func (u *SRVUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
// UnmarshalCaddyfile deserializes Caddyfile tokens into h.
//
-// dynamic a [<name> <port] {
-// name <name>
-// port <port>
-// refresh <interval>
-// resolvers <resolvers...>
-// dial_timeout <timeout>
-// dial_fallback_delay <timeout>
-// }
-//
+// dynamic a [<name> <port] {
+// name <name>
+// port <port>
+// refresh <interval>
+// resolvers <resolvers...>
+// dial_timeout <timeout>
+// dial_fallback_delay <timeout>
+// }
func (u *AUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
for d.Next() {
args := d.RemainingArgs()
@@ -1324,7 +1319,9 @@ func (u *AUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
}
if len(args) > 0 {
u.Name = args[0]
- u.Port = args[1]
+ if len(args) == 2 {
+ u.Port = args[1]
+ }
}
for d.NextBlock(0) {
@@ -1395,6 +1392,35 @@ func (u *AUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
return nil
}
+// UnmarshalCaddyfile deserializes Caddyfile tokens into h.
+//
+// dynamic multi {
+// <source> [...]
+// }
+func (u *MultiUpstreams) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
+ for d.Next() {
+ if d.NextArg() {
+ return d.ArgErr()
+ }
+
+ for nesting := d.Nesting(); d.NextBlock(nesting); {
+ dynModule := d.Val()
+ modID := "http.reverse_proxy.upstreams." + dynModule
+ unm, err := caddyfile.UnmarshalModule(d, modID)
+ if err != nil {
+ return err
+ }
+ source, ok := unm.(UpstreamSource)
+ if !ok {
+ return d.Errf("module %s (%T) is not an UpstreamSource", modID, unm)
+ }
+ u.SourcesRaw = append(u.SourcesRaw, caddyconfig.JSONModuleObject(source, "source", dynModule, nil))
+ }
+ }
+
+ return nil
+}
+
const matcherPrefix = "@"
// Interface guards
@@ -1403,4 +1429,5 @@ var (
_ caddyfile.Unmarshaler = (*HTTPTransport)(nil)
_ caddyfile.Unmarshaler = (*SRVUpstreams)(nil)
_ caddyfile.Unmarshaler = (*AUpstreams)(nil)
+ _ caddyfile.Unmarshaler = (*MultiUpstreams)(nil)
)