summaryrefslogtreecommitdiff
path: root/caddyconfig
diff options
context:
space:
mode:
Diffstat (limited to 'caddyconfig')
-rw-r--r--caddyconfig/caddyfile/adapter.go8
-rwxr-xr-xcaddyconfig/caddyfile/dispenser.go30
-rwxr-xr-xcaddyconfig/caddyfile/parse.go112
-rwxr-xr-xcaddyconfig/caddyfile/parse_test.go181
-rw-r--r--caddyconfig/httpcaddyfile/addresses.go162
-rw-r--r--caddyconfig/httpcaddyfile/addresses_test.go121
-rw-r--r--caddyconfig/httpcaddyfile/builtins.go356
-rw-r--r--caddyconfig/httpcaddyfile/directives.go182
-rw-r--r--caddyconfig/httpcaddyfile/handlers.go36
-rw-r--r--caddyconfig/httpcaddyfile/httptype.go369
10 files changed, 845 insertions, 712 deletions
diff --git a/caddyconfig/caddyfile/adapter.go b/caddyconfig/caddyfile/adapter.go
index ab4905a..377f77b 100644
--- a/caddyconfig/caddyfile/adapter.go
+++ b/caddyconfig/caddyfile/adapter.go
@@ -37,14 +37,12 @@ func (a Adapter) Adapt(body []byte, options map[string]string) ([]byte, []caddyc
options = make(map[string]string)
}
- directives := a.ServerType.ValidDirectives()
-
filename := options["filename"]
if filename == "" {
filename = "Caddyfile"
}
- serverBlocks, err := Parse(filename, bytes.NewReader(body), directives)
+ serverBlocks, err := Parse(filename, bytes.NewReader(body))
if err != nil {
return nil, nil, err
}
@@ -77,10 +75,6 @@ type Unmarshaler interface {
// ServerType is a type that can evaluate a Caddyfile and set up a caddy config.
type ServerType interface {
- // ValidDirectives returns a list of the
- // server type's recognized directives.
- ValidDirectives() []string
-
// Setup takes the server blocks which
// contain tokens, as well as options
// (e.g. CLI flags) and creates a Caddy
diff --git a/caddyconfig/caddyfile/dispenser.go b/caddyconfig/caddyfile/dispenser.go
index 1cf5d04..0d2c789 100755
--- a/caddyconfig/caddyfile/dispenser.go
+++ b/caddyconfig/caddyfile/dispenser.go
@@ -31,6 +31,7 @@ type Dispenser struct {
}
// NewDispenser returns a Dispenser filled with the given tokens.
+// TODO: Get rid of the filename argument; it seems pointless here
func NewDispenser(filename string, tokens []Token) *Dispenser {
return &Dispenser{
filename: filename,
@@ -51,15 +52,15 @@ func (d *Dispenser) Next() bool {
}
// Prev moves to the previous token. It does the inverse
-// of Next(). Generally, this should only be used in
-// special cases such as deleting a token from the slice
-// that d is iterating. In that case, without using Prev(),
-// the dispenser would be pointing at the wrong token since
-// deleting a token implicitly advances the cursor.
+// of Next(), except this function may decrement the cursor
+// to -1 so that the next call to Next() points to the
+// first token; this allows dispensing to "start over". This
+// method returns true if the cursor ends up pointing to a
+// valid token.
func (d *Dispenser) Prev() bool {
- if d.cursor > 0 {
+ if d.cursor > -1 {
d.cursor--
- return true
+ return d.cursor > -1
}
return false
}
@@ -223,8 +224,7 @@ func (d *Dispenser) RemainingArgs() []string {
// "directive" whether that be to the end of the line or
// the end of a block that starts at the end of the line.
func (d *Dispenser) NewFromNextTokens() *Dispenser {
- var tkns []Token
- tkns = append(tkns, d.Token())
+ tkns := []Token{d.Token()}
for d.NextArg() {
tkns = append(tkns, d.Token())
}
@@ -245,10 +245,14 @@ func (d *Dispenser) NewFromNextTokens() *Dispenser {
// Token returns the current token.
func (d *Dispenser) Token() Token {
- if d.cursor < 0 || d.cursor >= len(d.tokens) {
+ return d.TokenAt(d.cursor)
+}
+
+func (d *Dispenser) TokenAt(cursor int) Token {
+ if cursor < 0 || cursor >= len(d.tokens) {
return Token{}
}
- return d.tokens[d.cursor]
+ return d.tokens[cursor]
}
// Cursor returns the current cursor (token index).
@@ -256,6 +260,10 @@ func (d *Dispenser) Cursor() int {
return d.cursor
}
+func (d *Dispenser) Reset() {
+ d.cursor = -1
+}
+
// ArgErr returns an argument error, meaning that another
// argument was expected but not found. In other words,
// a line break or open curly brace was encountered instead of
diff --git a/caddyconfig/caddyfile/parse.go b/caddyconfig/caddyfile/parse.go
index cc91e3d..e5b25fc 100755
--- a/caddyconfig/caddyfile/parse.go
+++ b/caddyconfig/caddyfile/parse.go
@@ -28,12 +28,12 @@ import (
// Directives that do not appear in validDirectives will cause
// an error. If you do not want to check for valid directives,
// pass in nil instead.
-func Parse(filename string, input io.Reader, validDirectives []string) ([]ServerBlock, error) {
+func Parse(filename string, input io.Reader) ([]ServerBlock, error) {
tokens, err := allTokens(input)
if err != nil {
return nil, err
}
- p := parser{Dispenser: NewDispenser(filename, tokens), validDirectives: validDirectives}
+ p := parser{Dispenser: NewDispenser(filename, tokens)}
return p.parseAll()
}
@@ -56,9 +56,9 @@ func allTokens(input io.Reader) ([]Token, error) {
type parser struct {
*Dispenser
block ServerBlock // current server block being parsed
- validDirectives []string // a directive must be valid or it's an error
eof bool // if we encounter a valid EOF in a hard place
definedSnippets map[string][]Token
+ nesting int
}
func (p *parser) parseAll() ([]ServerBlock, error) {
@@ -72,14 +72,16 @@ func (p *parser) parseAll() ([]ServerBlock, error) {
if len(p.block.Keys) > 0 {
blocks = append(blocks, p.block)
}
+ if p.nesting > 0 {
+ return blocks, p.EOFErr()
+ }
}
return blocks, nil
}
func (p *parser) parseOne() error {
- p.block = ServerBlock{Tokens: make(map[string][]Token)}
-
+ p.block = ServerBlock{}
return p.begin()
}
@@ -186,7 +188,7 @@ func (p *parser) blockContents() error {
return err
}
- // Only look for close curly brace if there was an opening
+ // only look for close curly brace if there was an opening
if errOpenCurlyBrace == nil {
err = p.closeCurlyBrace()
if err != nil {
@@ -205,6 +207,7 @@ func (p *parser) directives() error {
for p.Next() {
// end of server block
if p.Val() == "}" {
+ // p.nesting has already been decremented
break
}
@@ -218,11 +221,15 @@ func (p *parser) directives() error {
continue
}
- // normal case: parse a directive on this line
+ // normal case: parse a directive as a new segment
+ // (a "segment" is a line which starts with a directive
+ // and which ends at the end of the line or at the end of
+ // the block that is opened at the end of the line)
if err := p.directive(); err != nil {
return err
}
}
+
return nil
}
@@ -345,25 +352,24 @@ func (p *parser) doSingleImport(importFile string) ([]Token, error) {
// are loaded into the current server block for later use
// by directive setup functions.
func (p *parser) directive() error {
- dir := replaceEnvVars(p.Val())
- nesting := 0
+ // evaluate any env vars in directive token
+ p.tokens[p.cursor].Text = replaceEnvVars(p.tokens[p.cursor].Text)
- if !p.validDirective(dir) {
- return p.Errf("Unknown directive '%s'", dir)
- }
+ // a segment is a list of tokens associated with this directive
+ var segment Segment
- // The directive itself is appended as a relevant token
- p.block.Tokens[dir] = append(p.block.Tokens[dir], p.tokens[p.cursor])
+ // the directive itself is appended as a relevant token
+ segment = append(segment, p.Token())
for p.Next() {
if p.Val() == "{" {
- nesting++
- } else if p.isNewLine() && nesting == 0 {
+ p.nesting++
+ } else if p.isNewLine() && p.nesting == 0 {
p.cursor-- // read too far
break
- } else if p.Val() == "}" && nesting > 0 {
- nesting--
- } else if p.Val() == "}" && nesting == 0 {
+ } else if p.Val() == "}" && p.nesting > 0 {
+ p.nesting--
+ } else if p.Val() == "}" && p.nesting == 0 {
return p.Err("Unexpected '}' because no matching opening brace")
} else if p.Val() == "import" && p.isNewLine() {
if err := p.doImport(); err != nil {
@@ -373,12 +379,15 @@ func (p *parser) directive() error {
continue
}
p.tokens[p.cursor].Text = replaceEnvVars(p.tokens[p.cursor].Text)
- p.block.Tokens[dir] = append(p.block.Tokens[dir], p.tokens[p.cursor])
+ segment = append(segment, p.Token())
}
- if nesting > 0 {
+ p.block.Segments = append(p.block.Segments, segment)
+
+ if p.nesting > 0 {
return p.EOFErr()
}
+
return nil
}
@@ -404,19 +413,6 @@ func (p *parser) closeCurlyBrace() error {
return nil
}
-// validDirective returns true if dir is in p.validDirectives.
-func (p *parser) validDirective(dir string) bool {
- if p.validDirectives == nil {
- return true
- }
- for _, d := range p.validDirectives {
- if d == dir {
- return true
- }
- }
- return false
-}
-
// replaceEnvVars replaces environment variables that appear in the token
// and understands both the $UNIX and %WINDOWS% syntaxes.
func replaceEnvVars(s string) string {
@@ -447,13 +443,6 @@ func replaceEnvReferences(s, refStart, refEnd string) string {
return s
}
-// ServerBlock associates any number of keys (usually addresses
-// of some sort) with tokens (grouped by directive name).
-type ServerBlock struct {
- Keys []string
- Tokens map[string][]Token
-}
-
func (p *parser) isSnippet() (bool, string) {
keys := p.block.Keys
// A snippet block is a single key with parens. Nothing else qualifies.
@@ -480,6 +469,7 @@ func (p *parser) snippetTokens() ([]Token, error) {
}
}
if p.Val() == "{" {
+ p.nesting++
count++
}
tokens = append(tokens, p.tokens[p.cursor])
@@ -490,3 +480,43 @@ func (p *parser) snippetTokens() ([]Token, error) {
}
return tokens, nil
}
+
+// ServerBlock associates any number of keys from the
+// head of the server block with tokens, which are
+// grouped by segments.
+type ServerBlock struct {
+ Keys []string
+ Segments []Segment
+}
+
+// DispenseDirective returns a dispenser that contains
+// all the tokens in the server block.
+func (sb ServerBlock) DispenseDirective(dir string) *Dispenser {
+ var tokens []Token
+ for _, seg := range sb.Segments {
+ if len(seg) > 0 && seg[0].Text == dir {
+ tokens = append(tokens, seg...)
+ }
+ }
+ return NewDispenser("", tokens)
+}
+
+// Segment is a list of tokens which begins with a directive
+// and ends at the end of the directive (either at the end of
+// the line, or at the end of a block it opens).
+type Segment []Token
+
+// Directive returns the directive name for the segment.
+// The directive name is the text of the first token.
+func (s Segment) Directive() string {
+ if len(s) > 0 {
+ return s[0].Text
+ }
+ return ""
+}
+
+// NewDispenser returns a dispenser for this
+// segment's tokens.
+func (s Segment) NewDispenser() *Dispenser {
+ return NewDispenser("", s)
+}
diff --git a/caddyconfig/caddyfile/parse_test.go b/caddyconfig/caddyfile/parse_test.go
index 654c68d..19959de 100755
--- a/caddyconfig/caddyfile/parse_test.go
+++ b/caddyconfig/caddyfile/parse_test.go
@@ -22,6 +22,8 @@ import (
"testing"
)
+// TODO: re-enable all tests
+
func TestAllTokens(t *testing.T) {
input := strings.NewReader("a b c\nd e")
expected := []string{"a", "b", "c", "d", "e"}
@@ -53,84 +55,67 @@ func TestParseOneAndImport(t *testing.T) {
input string
shouldErr bool
keys []string
- tokens map[string]int // map of directive name to number of tokens expected
+ numTokens []int // number of tokens to expect in each segment
}{
{`localhost`, false, []string{
"localhost",
- }, map[string]int{}},
+ }, []int{}},
{`localhost
dir1`, false, []string{
"localhost",
- }, map[string]int{
- "dir1": 1,
- }},
+ }, []int{1}},
{`localhost:1234
dir1 foo bar`, false, []string{
"localhost:1234",
- }, map[string]int{
- "dir1": 3,
- }},
+ }, []int{3},
+ },
{`localhost {
dir1
}`, false, []string{
"localhost",
- }, map[string]int{
- "dir1": 1,
- }},
+ }, []int{1}},
{`localhost:1234 {
dir1 foo bar
dir2
}`, false, []string{
"localhost:1234",
- }, map[string]int{
- "dir1": 3,
- "dir2": 1,
- }},
+ }, []int{3, 1}},
{`http://localhost https://localhost
dir1 foo bar`, false, []string{
"http://localhost",
"https://localhost",
- }, map[string]int{
- "dir1": 3,
- }},
+ }, []int{3}},
{`http://localhost https://localhost {
dir1 foo bar
}`, false, []string{
"http://localhost",
"https://localhost",
- }, map[string]int{
- "dir1": 3,
- }},
+ }, []int{3}},
{`http://localhost, https://localhost {
dir1 foo bar
}`, false, []string{
"http://localhost",
"https://localhost",
- }, map[string]int{
- "dir1": 3,
- }},
+ }, []int{3}},
{`http://localhost, {
}`, true, []string{
"http://localhost",
- }, map[string]int{}},
+ }, []int{}},
{`host1:80, http://host2.com
dir1 foo bar
dir2 baz`, false, []string{
"host1:80",
"http://host2.com",
- }, map[string]int{
- "dir1": 3,
- "dir2": 2,
- }},
+ }, []int{3, 2}},
{`http://host1.com,
http://host2.com,
@@ -138,7 +123,7 @@ func TestParseOneAndImport(t *testing.T) {
"http://host1.com",
"http://host2.com",
"https://host3.com",
- }, map[string]int{}},
+ }, []int{}},
{`http://host1.com:1234, https://host2.com
dir1 foo {
@@ -147,10 +132,7 @@ func TestParseOneAndImport(t *testing.T) {
dir2`, false, []string{
"http://host1.com:1234",
"https://host2.com",
- }, map[string]int{
- "dir1": 6,
- "dir2": 1,
- }},
+ }, []int{6, 1}},
{`127.0.0.1
dir1 {
@@ -160,34 +142,25 @@ func TestParseOneAndImport(t *testing.T) {
foo bar
}`, false, []string{
"127.0.0.1",
- }, map[string]int{
- "dir1": 5,
- "dir2": 5,
- }},
+ }, []int{5, 5}},
{`localhost
dir1 {
foo`, true, []string{
"localhost",
- }, map[string]int{
- "dir1": 3,
- }},
+ }, []int{3}},
{`localhost
dir1 {
}`, false, []string{
"localhost",
- }, map[string]int{
- "dir1": 3,
- }},
+ }, []int{3}},
{`localhost
dir1 {
} }`, true, []string{
"localhost",
- }, map[string]int{
- "dir1": 3,
- }},
+ }, []int{}},
{`localhost
dir1 {
@@ -197,48 +170,38 @@ func TestParseOneAndImport(t *testing.T) {
}
dir2 foo bar`, false, []string{
"localhost",
- }, map[string]int{
- "dir1": 7,
- "dir2": 3,
- }},
+ }, []int{7, 3}},
- {``, false, []string{}, map[string]int{}},
+ {``, false, []string{}, []int{}},
{`localhost
dir1 arg1
import testdata/import_test1.txt`, false, []string{
"localhost",
- }, map[string]int{
- "dir1": 2,
- "dir2": 3,
- "dir3": 1,
- }},
+ }, []int{2, 3, 1}},
{`import testdata/import_test2.txt`, false, []string{
"host1",
- }, map[string]int{
- "dir1": 1,
- "dir2": 2,
- }},
+ }, []int{1, 2}},
- {`import testdata/import_test1.txt testdata/import_test2.txt`, true, []string{}, map[string]int{}},
+ {`import testdata/import_test1.txt testdata/import_test2.txt`, true, []string{}, []int{}},
- {`import testdata/not_found.txt`, true, []string{}, map[string]int{}},
+ {`import testdata/not_found.txt`, true, []string{}, []int{}},
- {`""`, false, []string{}, map[string]int{}},
+ {`""`, false, []string{}, []int{}},
- {``, false, []string{}, map[string]int{}},
+ {``, false, []string{}, []int{}},
// test cases found by fuzzing!
- {`import }{$"`, true, []string{}, map[string]int{}},
- {`import /*/*.txt`, true, []string{}, map[string]int{}},
- {`import /???/?*?o`, true, []string{}, map[string]int{}},
- {`import /??`, true, []string{}, map[string]int{}},
- {`import /[a-z]`, true, []string{}, map[string]int{}},
- {`import {$}`, true, []string{}, map[string]int{}},
- {`import {%}`, true, []string{}, map[string]int{}},
- {`import {$$}`, true, []string{}, map[string]int{}},
- {`import {%%}`, true, []string{}, map[string]int{}},
+ {`import }{$"`, true, []string{}, []int{}},
+ {`import /*/*.txt`, true, []string{}, []int{}},
+ {`import /???/?*?o`, true, []string{}, []int{}},
+ {`import /??`, true, []string{}, []int{}},
+ {`import /[a-z]`, true, []string{}, []int{}},
+ {`import {$}`, true, []string{}, []int{}},
+ {`import {%}`, true, []string{}, []int{}},
+ {`import {$$}`, true, []string{}, []int{}},
+ {`import {%%}`, true, []string{}, []int{}},
} {
result, err := testParseOne(test.input)
@@ -261,15 +224,16 @@ func TestParseOneAndImport(t *testing.T) {
}
}
- if len(result.Tokens) != len(test.tokens) {
- t.Errorf("Test %d: Expected %d directives, had %d",
- i, len(test.tokens), len(result.Tokens))
+ if len(result.Segments) != len(test.numTokens) {
+ t.Errorf("Test %d: Expected %d segments, had %d",
+ i, len(test.numTokens), len(result.Segments))
continue
}
- for directive, tokens := range result.Tokens {
- if len(tokens) != test.tokens[directive] {
- t.Errorf("Test %d, directive '%s': Expected %d tokens, counted %d",
- i, directive, test.tokens[directive], len(tokens))
+
+ for j, seg := range result.Segments {
+ if len(seg) != test.numTokens[j] {
+ t.Errorf("Test %d, segment %d: Expected %d tokens, counted %d",
+ i, j, test.numTokens[j], len(seg))
continue
}
}
@@ -289,12 +253,12 @@ func TestRecursiveImport(t *testing.T) {
t.Errorf("got keys unexpected: expect localhost, got %v", got.Keys)
return false
}
- if len(got.Tokens) != 2 {
- t.Errorf("got wrong number of tokens: expect 2, got %d", len(got.Tokens))
+ if len(got.Segments) != 2 {
+ t.Errorf("got wrong number of segments: expect 2, got %d", len(got.Segments))
return false
}
- if len(got.Tokens["dir1"]) != 1 || len(got.Tokens["dir2"]) != 2 {
- t.Errorf("got unexpect tokens: %v", got.Tokens)
+ if len(got.Segments[0]) != 1 || len(got.Segments[1]) != 2 {
+ t.Errorf("got unexpect tokens: %v", got.Segments)
return false
}
return true
@@ -384,12 +348,12 @@ func TestDirectiveImport(t *testing.T) {
t.Errorf("got keys unexpected: expect localhost, got %v", got.Keys)
return false
}
- if len(got.Tokens) != 2 {
- t.Errorf("got wrong number of tokens: expect 2, got %d", len(got.Tokens))
+ if len(got.Segments) != 2 {
+ t.Errorf("got wrong number of segments: expect 2, got %d", len(got.Segments))
return false
}
- if len(got.Tokens["dir1"]) != 1 || len(got.Tokens["proxy"]) != 8 {
- t.Errorf("got unexpect tokens: %v", got.Tokens)
+ if len(got.Segments[0]) != 1 || len(got.Segments[1]) != 8 {
+ t.Errorf("got unexpect tokens: %v", got.Segments)
return false
}
return true
@@ -557,21 +521,21 @@ func TestEnvironmentReplacement(t *testing.T) {
if actual, expected := blocks[0].Keys[0], ":8080"; expected != actual {
t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
}
- if actual, expected := blocks[0].Tokens["dir1"][1].Text, "foobar"; expected != actual {
+ if actual, expected := blocks[0].Segments[0][1].Text, "foobar"; expected != actual {
t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
}
// combined windows env vars in argument
p = testParser(":{%PORT%}\ndir1 {%ADDRESS%}/{%FOOBAR%}")
blocks, _ = p.parseAll()
- if actual, expected := blocks[0].Tokens["dir1"][1].Text, "servername.com/foobar"; expected != actual {
+ if actual, expected := blocks[0].Segments[0][1].Text, "servername.com/foobar"; expected != actual {
t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
}
// malformed env var (windows)
p = testParser(":1234\ndir1 {%ADDRESS}")
blocks, _ = p.parseAll()
- if actual, expected := blocks[0].Tokens["dir1"][1].Text, "{%ADDRESS}"; expected != actual {
+ if actual, expected := blocks[0].Segments[0][1].Text, "{%ADDRESS}"; expected != actual {
t.Errorf("Expected host to be '%s' but was '%s'", expected, actual)
}
@@ -585,22 +549,18 @@ func TestEnvironmentReplacement(t *testing.T) {
// in quoted field
p = testParser(":1234\ndir1 \"Test {$FOOBAR} test\"")
blocks, _ = p.parseAll()
- if actual, expected := blocks[0].Tokens["dir1"][1].Text, "Test foobar test"; expected != actual {
+ if actual, expected := blocks[0].Segments[0][1].Text, "Test foobar test"; expected != actual {
t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
}
// after end token
p = testParser(":1234\nanswer \"{{ .Name }} {$FOOBAR}\"")
blocks, _ = p.parseAll()
- if actual, expected := blocks[0].Tokens["answer"][1].Text, "{{ .Name }} foobar"; expected != actual {
+ if actual, expected := blocks[0].Segments[0][1].Text, "{{ .Name }} foobar"; expected != actual {
t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
}
}
-func testParser(input string) parser {
- return parser{Dispenser: newTestDispenser(input)}
-}
-
func TestSnippets(t *testing.T) {
p := testParser(`
(common) {
@@ -617,7 +577,7 @@ func TestSnippets(t *testing.T) {
}
for _, b := range blocks {
t.Log(b.Keys)
- t.Log(b.Tokens)
+ t.Log(b.Segments)
}
if len(blocks) != 1 {
t.Fatalf("Expect exactly one server block. Got %d.", len(blocks))
@@ -625,16 +585,15 @@ func TestSnippets(t *testing.T) {
if actual, expected := blocks[0].Keys[0], "http://example.com"; expected != actual {
t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual)
}
- if len(blocks[0].Tokens) != 2 {
- t.Fatalf("Server block should have tokens from import")
+ if len(blocks[0].Segments) != 2 {
+ t.Fatalf("Server block should have tokens from import, got: %+v", blocks[0])
}
- if actual, expected := blocks[0].Tokens["gzip"][0].Text, "gzip"; expected != actual {
+ if actual, expected := blocks[0].Segments[0][0].Text, "gzip"; expected != actual {
t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
}
- if actual, expected := blocks[0].Tokens["errors"][1].Text, "stderr"; expected != actual {
+ if actual, expected := blocks[0].Segments[1][1].Text, "stderr"; expected != actual {
t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
}
-
}
func writeStringToTempFileOrDie(t *testing.T, str string) (pathToFile string) {
@@ -666,9 +625,9 @@ func TestImportedFilesIgnoreNonDirectiveImportTokens(t *testing.T) {
}
for _, b := range blocks {
t.Log(b.Keys)
- t.Log(b.Tokens)
+ t.Log(b.Segments)
}
- auth := blocks[0].Tokens["basicauth"]
+ auth := blocks[0].Segments[0]
line := auth[0].Text + " " + auth[1].Text + " " + auth[2].Text + " " + auth[3].Text
if line != "basicauth / import password" {
// Previously, it would be changed to:
@@ -701,7 +660,7 @@ func TestSnippetAcrossMultipleFiles(t *testing.T) {
}
for _, b := range blocks {
t.Log(b.Keys)
- t.Log(b.Tokens)
+ t.Log(b.Segments)
}
if len(blocks) != 1 {
t.Fatalf("Expect exactly one server block. Got %d.", len(blocks))
@@ -709,10 +668,14 @@ func TestSnippetAcrossMultipleFiles(t *testing.T) {
if actual, expected := blocks[0].Keys[0], "http://example.com"; expected != actual {
t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual)
}
- if len(blocks[0].Tokens) != 1 {
+ if len(blocks[0].Segments) != 1 {
t.Fatalf("Server block should have tokens from import")
}
- if actual, expected := blocks[0].Tokens["gzip"][0].Text, "gzip"; expected != actual {
+ if actual, expected := blocks[0].Segments[0][0].Text, "gzip"; expected != actual {
t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
}
}
+
+func testParser(input string) parser {
+ return parser{Dispenser: newTestDispenser(input)}
+}
diff --git a/caddyconfig/httpcaddyfile/addresses.go b/caddyconfig/httpcaddyfile/addresses.go
index 6ecee26..2adb818 100644
--- a/caddyconfig/httpcaddyfile/addresses.go
+++ b/caddyconfig/httpcaddyfile/addresses.go
@@ -22,7 +22,7 @@ import (
"strconv"
"strings"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/mholt/certmagic"
)
@@ -73,8 +73,8 @@ import (
// repetition may be undesirable, so call consolidateAddrMappings() to map
// multiple addresses to the same lists of server blocks (a many:many mapping).
// (Doing this is essentially a map-reduce technique.)
-func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []caddyfile.ServerBlock) (map[string][]caddyfile.ServerBlock, error) {
- sbmap := make(map[string][]caddyfile.ServerBlock)
+func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []serverBlock) (map[string][]serverBlock, error) {
+ sbmap := make(map[string][]serverBlock)
for i, sblock := range originalServerBlocks {
// within a server block, we need to map all the listener addresses
@@ -83,7 +83,7 @@ func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []caddyfile.
// key of a server block as its own, but without having to repeat its
// contents in cases where multiple keys really can be served together
addrToKeys := make(map[string][]string)
- for j, key := range sblock.Keys {
+ for j, key := range sblock.block.Keys {
// a key can have multiple listener addresses if there are multiple
// arguments to the 'bind' directive (although they will all have
// the same port, since the port is defined by the key or is implicit
@@ -105,9 +105,12 @@ func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []caddyfile.
// server block are only the ones which use the address; but
// the contents (tokens) are of course the same
for addr, keys := range addrToKeys {
- sbmap[addr] = append(sbmap[addr], caddyfile.ServerBlock{
- Keys: keys,
- Tokens: sblock.Tokens,
+ sbmap[addr] = append(sbmap[addr], serverBlock{
+ block: caddyfile.ServerBlock{
+ Keys: keys,
+ Segments: sblock.block.Segments,
+ },
+ pile: sblock.pile,
})
}
}
@@ -123,7 +126,7 @@ func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []caddyfile.
// entries are deleted from the addrToServerBlocks map. Essentially, each pairing (each
// association from multiple addresses to multiple server blocks; i.e. each element of
// the returned slice) becomes a server definition in the output JSON.
-func (st *ServerType) consolidateAddrMappings(addrToServerBlocks map[string][]caddyfile.ServerBlock) []sbAddrAssociation {
+func (st *ServerType) consolidateAddrMappings(addrToServerBlocks map[string][]serverBlock) []sbAddrAssociation {
var sbaddrs []sbAddrAssociation
for addr, sblocks := range addrToServerBlocks {
// we start with knowing that at least this address
@@ -151,11 +154,12 @@ func (st *ServerType) consolidateAddrMappings(addrToServerBlocks map[string][]ca
return sbaddrs
}
-func (st *ServerType) listenerAddrsForServerBlockKey(sblock caddyfile.ServerBlock, key string) ([]string, error) {
- addr, err := standardizeAddress(key)
+func (st *ServerType) listenerAddrsForServerBlockKey(sblock serverBlock, key string) ([]string, error) {
+ addr, err := ParseAddress(key)
if err != nil {
return nil, fmt.Errorf("parsing key: %v", err)
}
+ addr = addr.Normalize()
lnPort := defaultPort
if addr.Port != "" {
@@ -168,11 +172,8 @@ func (st *ServerType) listenerAddrsForServerBlockKey(sblock caddyfile.ServerBloc
// the bind directive specifies hosts, but is optional
var lnHosts []string
- for i, token := range sblock.Tokens["bind"] {
- if i == 0 {
- continue
- }
- lnHosts = append(lnHosts, token.Text)
+ for _, cfgVal := range sblock.pile["bind"] {
+ lnHosts = append(lnHosts, cfgVal.Value.([]string)...)
}
if len(lnHosts) == 0 {
lnHosts = []string{""}
@@ -205,7 +206,53 @@ type Address struct {
Original, Scheme, Host, Port, Path string
}
-// String returns a human-friendly print of the address.
+// ParseAddress parses an address string into a structured format with separate
+// scheme, host, port, and path portions, as well as the original input string.
+func ParseAddress(str string) (Address, error) {
+ httpPort, httpsPort := strconv.Itoa(certmagic.HTTPPort), strconv.Itoa(certmagic.HTTPSPort)
+
+ input := str
+
+ // Split input into components (prepend with // to force host portion by default)
+ if !strings.Contains(str, "//") && !strings.HasPrefix(str, "/") {
+ str = "//" + str
+ }
+
+ u, err := url.Parse(str)
+ if err != nil {
+ return Address{}, err
+ }
+
+ // separate host and port
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ host, port, err = net.SplitHostPort(u.Host + ":")
+ if err != nil {
+ host = u.Host
+ }
+ }
+
+ // see if we can set port based off scheme
+ if port == "" {
+ if u.Scheme == "http" {
+ port = httpPort
+ } else if u.Scheme == "https" {
+ port = httpsPort
+ }
+ }
+
+ // error if scheme and port combination violate convention
+ if (u.Scheme == "http" && port == httpsPort) || (u.Scheme == "https" && port == httpPort) {
+ return Address{}, fmt.Errorf("[%s] scheme and port violate convention", input)
+ }
+
+ return Address{Original: input, Scheme: u.Scheme, Host: host, Port: port, Path: u.Path}, err
+}
+
+// TODO: which of the methods on Address are even used?
+
+// String returns a human-readable form of a. It will
+// be a cleaned-up and filled-out URL string.
func (a Address) String() string {
if a.Host == "" && a.Port == "" {
return ""
@@ -235,16 +282,7 @@ func (a Address) String() string {
return s
}
-// VHost returns a sensible concatenation of Host:Port/Path from a.
-// It's basically the a.Original but without the scheme.
-func (a Address) VHost() string {
- if idx := strings.Index(a.Original, "://"); idx > -1 {
- return a.Original[idx+3:]
- }
- return a.Original
-}
-
-// Normalize normalizes URL: turn scheme and host names into lower case
+// Normalize returns a normalized version of a.
func (a Address) Normalize() Address {
path := a.Path
if !caseSensitivePath {
@@ -266,8 +304,8 @@ func (a Address) Normalize() Address {
}
}
-// Key is similar to String, just replaces scheme and host values with modified values.
-// Unlike String it doesn't add anything default (scheme, port, etc)
+// Key returns a string form of a, much like String() does, but this
+// method doesn't add anything default that wasn't in the original.
func (a Address) Key() string {
res := ""
if a.Scheme != "" {
@@ -276,11 +314,11 @@ func (a Address) Key() string {
if a.Host != "" {
res += a.Host
}
- if a.Port != "" {
- if strings.HasPrefix(a.Original[len(res):], ":"+a.Port) {
- // insert port only if the original has its own explicit port
- res += ":" + a.Port
- }
+ // insert port only if the original has its own explicit port
+ if a.Port != "" &&
+ len(a.Original) >= len(res) &&
+ strings.HasPrefix(a.Original[len(res):], ":"+a.Port) {
+ res += ":" + a.Port
}
if a.Path != "" {
res += a.Path
@@ -288,63 +326,7 @@ func (a Address) Key() string {
return res
}
-// standardizeAddress parses an address string into a structured format with separate
-// scheme, host, port, and path portions, as well as the original input string.
-func standardizeAddress(str string) (Address, error) {
- httpPort, httpsPort := strconv.Itoa(certmagic.HTTPPort), strconv.Itoa(certmagic.HTTPSPort)
-
- input := str
-
- // Split input into components (prepend with // to assert host by default)
- if !strings.Contains(str, "//") && !strings.HasPrefix(str, "/") {
- str = "//" + str
- }
- u, err := url.Parse(str)
- if err != nil {
- return Address{}, err
- }
-
- // separate host and port
- host, port, err := net.SplitHostPort(u.Host)
- if err != nil {
- host, port, err = net.SplitHostPort(u.Host + ":")
- if err != nil {
- host = u.Host
- }
- }
-
- // see if we can set port based off scheme
- if port == "" {
- if u.Scheme == "http" {
- port = httpPort
- } else if u.Scheme == "https" {
- port = httpsPort
- }
- }
-
- // repeated or conflicting scheme is confusing, so error
- if u.Scheme != "" && (port == "http" || port == "https") {
- return Address{}, fmt.Errorf("[%s] scheme specified twice in address", input)
- }
-
- // error if scheme and port combination violate convention
- if (u.Scheme == "http" && port == httpsPort) || (u.Scheme == "https" && port == httpPort) {
- return Address{}, fmt.Errorf("[%s] scheme and port violate convention", input)
- }
-
- // standardize http and https ports to their respective port numbers
- if port == "http" {
- u.Scheme = "http"
- port = httpPort
- } else if port == "https" {
- u.Scheme = "https"
- port = httpsPort
- }
-
- return Address{Original: input, Scheme: u.Scheme, Host: host, Port: port, Path: u.Path}, err
-}
-
const (
defaultPort = "2015"
- caseSensitivePath = false
+ caseSensitivePath = false // TODO: Used?
)
diff --git a/caddyconfig/httpcaddyfile/addresses_test.go b/caddyconfig/httpcaddyfile/addresses_test.go
index 7e03d29..d6aa6f6 100644
--- a/caddyconfig/httpcaddyfile/addresses_test.go
+++ b/caddyconfig/httpcaddyfile/addresses_test.go
@@ -1,22 +1,11 @@
-// Copyright 2015 Matthew Holt and The Caddy Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
package httpcaddyfile
-import "testing"
+import (
+ "strings"
+ "testing"
+)
-func TestStandardizeAddress(t *testing.T) {
+func TestParseAddress(t *testing.T) {
for i, test := range []struct {
input string
scheme, host, port, path string
@@ -31,14 +20,15 @@ func TestStandardizeAddress(t *testing.T) {
{`[::1]`, "", "::1", "", "", false},
{`[::1]:1234`, "", "::1", "1234", "", false},
{`:`, "", "", "", "", false},
- {`localhost:http`, "http", "localhost", "80", "", false},
- {`localhost:https`, "https", "localhost", "443", "", false},
- {`:http`, "http", "", "80", "", false},
- {`:https`, "https", "", "443", "", false},
+ {`:http`, "", "", "", "", true},
+ {`:https`, "", "", "", "", true},
+ {`localhost:http`, "", "", "", "", true}, // using service name in port is verboten, as of Go 1.12.8
+ {`localhost:https`, "", "", "", "", true},
{`http://localhost:https`, "", "", "", "", true}, // conflict
{`http://localhost:http`, "", "", "", "", true}, // repeated scheme
- {`http://localhost:443`, "", "", "", "", true}, // not conventional
- {`https://localhost:80`, "", "", "", "", true}, // not conventional
+ {`host:https/path`, "", "", "", "", true},
+ {`http://localhost:443`, "", "", "", "", true}, // not conventional
+ {`https://localhost:80`, "", "", "", "", true}, // not conventional
{`http://localhost`, "http", "localhost", "80", "", false},
{`https://localhost`, "https", "localhost", "443", "", false},
{`http://127.0.0.1`, "http", "127.0.0.1", "80", "", false},
@@ -58,10 +48,9 @@ func TestStandardizeAddress(t *testing.T) {
{`http://host/path`, "http", "host", "80", "/path", false},
{`https://host:443/path/foo`, "https", "host", "443", "/path/foo", false},
{`host:80/path`, "", "host", "80", "/path", false},
- {`host:https/path`, "https", "host", "443", "/path", false},
{`/path`, "", "", "", "/path", false},
} {
- actual, err := standardizeAddress(test.input)
+ actual, err := ParseAddress(test.input)
if err != nil && !test.shouldErr {
t.Errorf("Test %d (%s): Expected no error, but had error: %v", i, test.input, err)
@@ -88,24 +77,6 @@ func TestStandardizeAddress(t *testing.T) {
}
}
-func TestAddressVHost(t *testing.T) {
- for i, test := range []struct {
- addr Address
- expected string
- }{
- {Address{Original: "host:1234"}, "host:1234"},
- {Address{Original: "host:1234/foo"}, "host:1234/foo"},
- {Address{Original: "host/foo"}, "host/foo"},
- {Address{Original: "http://host/foo"}, "host/foo"},
- {Address{Original: "https://host/foo"}, "host/foo"},
- } {
- actual := test.addr.VHost()
- if actual != test.expected {
- t.Errorf("Test %d: expected '%s' but got '%s'", i, test.expected, actual)
- }
- }
-}
-
func TestAddressString(t *testing.T) {
for i, test := range []struct {
addr Address
@@ -127,3 +98,69 @@ func TestAddressString(t *testing.T) {
}
}
}
+
+func TestKeyNormalization(t *testing.T) {
+ testCases := []struct {
+ input string
+ expect string
+ }{
+ {
+ input: "http://host:1234/path",
+ expect: "http://host:1234/path",
+ },
+ {
+ input: "HTTP://A/ABCDEF",
+ expect: "http://a/ABCDEF",
+ },
+ {
+ input: "A/ABCDEF",
+ expect: "a/ABCDEF",
+ },
+ {
+ input: "A:2015/Path",
+ expect: "a:2015/Path",
+ },
+ {
+ input: ":80",
+ expect: ":80",
+ },
+ {
+ input: ":443",
+ expect: ":443",
+ },
+ {
+ input: ":1234",
+ expect: ":1234",
+ },
+ {
+ input: "",
+ expect: "",
+ },
+ {
+ input: ":",
+ expect: "",
+ },
+ {
+ input: "[::]",
+ expect: "::",
+ },
+ }
+ for i, tc := range testCases {
+ addr, err := ParseAddress(tc.input)
+ if err != nil {
+ t.Errorf("Test %d: Parsing address '%s': %v", i, tc.input, err)
+ continue
+ }
+ expect := tc.expect
+ if !caseSensitivePath {
+ // every other part of the address should be lowercased when normalized,
+ // so simply lower-case the whole thing to do case-insensitive comparison
+ // of the path as well
+ expect = strings.ToLower(expect)
+ }
+ if actual := addr.Normalize().Key(); actual != expect {
+ t.Errorf("Test %d: Normalized key for address '%s' was '%s' but expected '%s'", i, tc.input, actual, expect)
+ }
+
+ }
+}
diff --git a/caddyconfig/httpcaddyfile/builtins.go b/caddyconfig/httpcaddyfile/builtins.go
index 7e51e46..0fdfcd5 100644
--- a/caddyconfig/httpcaddyfile/builtins.go
+++ b/caddyconfig/httpcaddyfile/builtins.go
@@ -19,239 +19,237 @@ import (
"fmt"
"html"
"net/http"
+ "reflect"
- "github.com/caddyserver/caddy/v2/caddyconfig"
- "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
- "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/caddyconfig"
+ "github.com/caddyserver/caddy/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddytls"
)
-func (st *ServerType) parseRoot(
- tkns []caddyfile.Token,
- matcherDefs map[string]map[string]json.RawMessage,
- warnings *[]caddyconfig.Warning,
-) ([]caddyhttp.Route, error) {
- var routes []caddyhttp.Route
-
- matchersAndTokens, err := st.tokensToMatcherSets(tkns, matcherDefs, warnings)
- if err != nil {
- return nil, err
- }
-
- for _, mst := range matchersAndTokens {
- d := caddyfile.NewDispenser("Caddyfile", mst.tokens)
-
- var root string
- for d.Next() {
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- root = d.Val()
- if d.NextArg() {
- return nil, d.ArgErr()
- }
- }
-
- varsHandler := caddyhttp.VarsMiddleware{"root": root}
- route := caddyhttp.Route{
- Handle: []json.RawMessage{
- caddyconfig.JSONModuleObject(varsHandler, "handler", "vars", warnings),
- },
- }
- if mst.matcherSet != nil {
- route.MatcherSets = []map[string]json.RawMessage{mst.matcherSet}
- }
+func init() {
+ RegisterDirective("bind", parseBind)
+ RegisterDirective("root", parseRoot)
+ RegisterDirective("tls", parseTLS)
+ RegisterHandlerDirective("redir", parseRedir)
+}
- routes = append(routes, route)
+func parseBind(h Helper) ([]ConfigValue, error) {
+ var lnHosts []string
+ for h.Next() {
+ lnHosts = append(lnHosts, h.RemainingArgs()...)
}
-
- return routes, nil
+ return h.NewBindAddresses(lnHosts), nil
}
-func (st *ServerType) parseRedir(
- tkns []caddyfile.Token,
- matcherDefs map[string]map[string]json.RawMessage,
- warnings *[]caddyconfig.Warning,
-) ([]caddyhttp.Route, error) {
- var routes []caddyhttp.Route
+func parseRoot(h Helper) ([]ConfigValue, error) {
+ if !h.Next() {
+ return nil, h.ArgErr()
+ }
- matchersAndTokens, err := st.tokensToMatcherSets(tkns, matcherDefs, warnings)
+ matcherSet, ok, err := h.MatcherToken()
if err != nil {
return nil, err
}
-
- for _, mst := range matchersAndTokens {
- var route caddyhttp.Route
-
- d := caddyfile.NewDispenser("Caddyfile", mst.tokens)
-
- for d.Next() {
- if !d.NextArg() {
- return nil, d.ArgErr()
- }
- to := d.Val()
-
- var code string
- if d.NextArg() {
- code = d.Val()
- }
- if code == "permanent" {
- code = "301"
- }
- if code == "temporary" || code == "" {
- code = "307"
- }
- var body string
- if code == "meta" {
- // Script tag comes first since that will better imitate a redirect in the browser's
- // history, but the meta tag is a fallback for most non-JS clients.
- const metaRedir = `<!DOCTYPE html>
-<html>
- <head>
- <title>Redirecting...</title>
- <script>window.location.replace("%s");</script>
- <meta http-equiv="refresh" content="0; URL='%s'">
- </head>
- <body>Redirecting to <a href="%s">%s</a>...</body>
-</html>
-`
- safeTo := html.EscapeString(to)
- body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo)
- }
-
- handler := caddyhttp.StaticResponse{
- StatusCode: caddyhttp.WeakString(code),
- Headers: http.Header{"Location": []string{to}},
- Body: body,
- }
-
- route.Handle = append(route.Handle,
- caddyconfig.JSONModuleObject(handler, "handler", "static_response", warnings))
- }
-
- if mst.matcherSet != nil {
- route.MatcherSets = []map[string]json.RawMessage{mst.matcherSet}
- }
-
- routes = append(routes, route)
+ if !ok {
+ // no matcher token; oops
+ h.Dispenser.Prev()
}
- return routes, nil
-}
-
-func (st *ServerType) parseTLSAutomationManager(d *caddyfile.Dispenser) (caddytls.ACMEManagerMaker, error) {
- var m caddytls.ACMEManagerMaker
-
- for d.Next() {
- firstLine := d.RemainingArgs()
- if len(firstLine) == 1 && firstLine[0] != "off" {
- m.Email = firstLine[0]
- }
-
- var hasBlock bool
- for d.NextBlock() {
- hasBlock = true
- switch d.Val() {
- case "ca":
- arg := d.RemainingArgs()
- if len(arg) != 1 {
- return m, d.ArgErr()
- }
- m.CA = arg[0]
- // TODO: other properties
- }
- }
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ root := h.Val()
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
- // a naked tls directive is not allowed
- if len(firstLine) == 0 && !hasBlock {
- return m, d.ArgErr()
- }
+ varsHandler := caddyhttp.VarsMiddleware{"root": root}
+ route := caddyhttp.Route{
+ HandlersRaw: []json.RawMessage{
+ caddyconfig.JSONModuleObject(varsHandler, "handler", "vars", nil),
+ },
+ }
+ if matcherSet != nil {
+ route.MatcherSetsRaw = []map[string]json.RawMessage{matcherSet}
}
- return m, nil
+ return h.NewVarsRoute(route), nil
}
-func (st *ServerType) parseTLSCerts(d *caddyfile.Dispenser) (map[string]caddytls.CertificateLoader, error) {
+func parseTLS(h Helper) ([]ConfigValue, error) {
+ var configVals []ConfigValue
+
+ cp := new(caddytls.ConnectionPolicy)
var fileLoader caddytls.FileLoader
var folderLoader caddytls.FolderLoader
-
- for d.Next() {
- // file loader
- firstLine := d.RemainingArgs()
- if len(firstLine) == 2 {
+ var mgr caddytls.ACMEManagerMaker
+ var off bool
+
+ for h.Next() {
+ // file certificate loader
+ firstLine := h.RemainingArgs()
+ switch len(firstLine) {
+ case 0:
+ case 1:
+ if firstLine[0] == "off" {
+ off = true
+ } else {
+ mgr.Email = firstLine[0]
+ }
+ case 2:
fileLoader = append(fileLoader, caddytls.CertKeyFilePair{
Certificate: firstLine[0],
Key: firstLine[1],
- // TODO: tags, for enterprise module's certificate selection
+ // TODO: add tags, for enterprise module's certificate selection
})
+ default:
+ return nil, h.ArgErr()
}
- // folder loader
- for d.NextBlock() {
- if d.Val() == "load" {
- folderLoader = append(folderLoader, d.RemainingArgs()...)
- }
- }
- }
-
- // put configured loaders into the map
- loaders := make(map[string]caddytls.CertificateLoader)
- if len(fileLoader) > 0 {
- loaders["load_files"] = fileLoader
- }
- if len(folderLoader) > 0 {
- loaders["load_folders"] = folderLoader
- }
-
- return loaders, nil
-}
+ var hasBlock bool
+ for h.NextBlock() {
+ hasBlock = true
-func (st *ServerType) parseTLSConnPolicy(d *caddyfile.Dispenser) (*caddytls.ConnectionPolicy, error) {
- cp := new(caddytls.ConnectionPolicy)
+ switch h.Val() {
- for d.Next() {
- for d.NextBlock() {
- switch d.Val() {
+ // connection policy
case "protocols":
- args := d.RemainingArgs()
+ args := h.RemainingArgs()
if len(args) == 0 {
- return nil, d.SyntaxErr("one or two protocols")
+ return nil, h.SyntaxErr("one or two protocols")
}
if len(args) > 0 {
if _, ok := caddytls.SupportedProtocols[args[0]]; !ok {
- return nil, d.Errf("Wrong protocol name or protocol not supported: '%s'", args[0])
+ return nil, h.Errf("Wrong protocol name or protocol not supported: '%s'", args[0])
}
cp.ProtocolMin = args[0]
}
if len(args) > 1 {
if _, ok := caddytls.SupportedProtocols[args[1]]; !ok {
- return nil, d.Errf("Wrong protocol name or protocol not supported: '%s'", args[1])
+ return nil, h.Errf("Wrong protocol name or protocol not supported: '%s'", args[1])
}
cp.ProtocolMax = args[1]
}
case "ciphers":
- for d.NextArg() {
- if _, ok := caddytls.SupportedCipherSuites[d.Val()]; !ok {
- return nil, d.Errf("Wrong cipher suite name or cipher suite not supported: '%s'", d.Val())
+ for h.NextArg() {
+ if _, ok := caddytls.SupportedCipherSuites[h.Val()]; !ok {
+ return nil, h.Errf("Wrong cipher suite name or cipher suite not supported: '%s'", h.Val())
}
- cp.CipherSuites = append(cp.CipherSuites, d.Val())
+ cp.CipherSuites = append(cp.CipherSuites, h.Val())
}
case "curves":
- for d.NextArg() {
- if _, ok := caddytls.SupportedCurves[d.Val()]; !ok {
- return nil, d.Errf("Wrong curve name or curve not supported: '%s'", d.Val())
+ for h.NextArg() {
+ if _, ok := caddytls.SupportedCurves[h.Val()]; !ok {
+ return nil, h.Errf("Wrong curve name or curve not supported: '%s'", h.Val())
}
- cp.Curves = append(cp.Curves, d.Val())
+ cp.Curves = append(cp.Curves, h.Val())
}
case "alpn":
- args := d.RemainingArgs()
+ args := h.RemainingArgs()
if len(args) == 0 {
- return nil, d.ArgErr()
+ return nil, h.ArgErr()
}
cp.ALPN = args
+
+ // certificate folder loader
+ case "load":
+ folderLoader = append(folderLoader, h.RemainingArgs()...)
+
+ // automation policy
+ case "ca":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ mgr.CA = arg[0]
+
+ // TODO: other properties for automation manager
}
}
+
+ // a naked tls directive is not allowed
+ if len(firstLine) == 0 && !hasBlock {
+ return nil, h.ArgErr()
+ }
+ }
+
+ // connection policy
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.connection_policy",
+ Value: cp,
+ })
+
+ // certificate loaders
+ if len(fileLoader) > 0 {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.certificate_loader",
+ Value: fileLoader,
+ })
+ }
+ if len(folderLoader) > 0 {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.certificate_loader",
+ Value: folderLoader,
+ })
+ }
+
+ // automation policy
+ if off {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.off",
+ Value: true,
+ })
+ } else if !reflect.DeepEqual(mgr, caddytls.ACMEManagerMaker{}) {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.automation_manager",
+ Value: mgr,
+ })
+ }
+
+ return configVals, nil
+}
+
+func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ if !h.Next() {
+ return nil, h.ArgErr()
+ }
+
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ to := h.Val()
+
+ var code string
+ if h.NextArg() {
+ code = h.Val()
+ }
+ if code == "permanent" {
+ code = "301"
+ }
+ if code == "temporary" || code == "" {
+ code = "307"
+ }
+ var body string
+ if code == "meta" {
+ // Script tag comes first since that will better imitate a redirect in the browser's
+ // history, but the meta tag is a fallback for most non-JS clients.
+ const metaRedir = `<!DOCTYPE html>
+<html>
+ <head>
+ <title>Redirecting...</title>
+ <script>window.location.replace("%s");</script>
+ <meta http-equiv="refresh" content="0; URL='%s'">
+ </head>
+ <body>Redirecting to <a href="%s">%s</a>...</body>
+</html>
+`
+ safeTo := html.EscapeString(to)
+ body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo)
}
- return cp, nil
+ return caddyhttp.StaticResponse{
+ StatusCode: caddyhttp.WeakString(code),
+ Headers: http.Header{"Location": []string{to}},
+ Body: body,
+ }, nil
}
diff --git a/caddyconfig/httpcaddyfile/directives.go b/caddyconfig/httpcaddyfile/directives.go
new file mode 100644
index 0000000..526ac87
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/directives.go
@@ -0,0 +1,182 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "encoding/json"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+// defaultDirectiveOrder specifies the order
+// to apply directives in HTTP routes.
+// TODO: finish the ability to customize this
+var defaultDirectiveOrder = []string{
+ "rewrite",
+ "try_files",
+ "headers",
+ "encode",
+ "templates",
+ "redir",
+ "static_response", // TODO: "reply" or "respond"?
+ "reverse_proxy",
+ "file_server",
+}
+
+// RegisterDirective registers a unique directive dir with an
+// associated unmarshaling (setup) function. When directive dir
+// is encountered in a Caddyfile, setupFunc will be called to
+// unmarshal its tokens.
+func RegisterDirective(dir string, setupFunc UnmarshalFunc) {
+ if _, ok := registeredDirectives[dir]; ok {
+ panic("directive " + dir + " already registered")
+ }
+ registeredDirectives[dir] = setupFunc
+}
+
+// RegisterHandlerDirective is like RegisterDirective, but for
+// directives which specifically output only an HTTP handler.
+func RegisterHandlerDirective(dir string, setupFunc UnmarshalHandlerFunc) {
+ RegisterDirective(dir, func(h Helper) ([]ConfigValue, error) {
+ if !h.Next() {
+ return nil, h.ArgErr()
+ }
+
+ matcherSet, ok, err := h.MatcherToken()
+ if err != nil {
+ return nil, err
+ }
+ if ok {
+ h.Dispenser.Delete() // strip matcher token
+ }
+
+ h.Dispenser.Reset() // pretend this lookahead never happened
+ val, err := setupFunc(h)
+ if err != nil {
+ return nil, err
+ }
+
+ return h.NewRoute(matcherSet, val), nil
+ })
+}
+
+// Helper is a type which helps setup a value from
+// Caddyfile tokens.
+type Helper struct {
+ *caddyfile.Dispenser
+ warnings *[]caddyconfig.Warning
+ matcherDefs map[string]map[string]json.RawMessage
+}
+
+// JSON converts val into JSON. Any errors are added to warnings.
+func (h Helper) JSON(val interface{}, warnings *[]caddyconfig.Warning) json.RawMessage {
+ return caddyconfig.JSON(val, h.warnings)
+}
+
+// MatcherToken assumes the current token is (possibly) a matcher, and
+// if so, returns the matcher set along with a true value. If the current
+// token is not a matcher, nil and false is returned. Note that a true
+// value may be returned with a nil matcher set if it is a catch-all.
+func (h Helper) MatcherToken() (map[string]json.RawMessage, bool, error) {
+ if !h.NextArg() {
+ return nil, false, nil
+ }
+ return matcherSetFromMatcherToken(h.Dispenser.Token(), h.matcherDefs, h.warnings)
+}
+
+// NewRoute returns config values relevant to creating a new HTTP route.
+func (h Helper) NewRoute(matcherSet map[string]json.RawMessage,
+ handler caddyhttp.MiddlewareHandler) []ConfigValue {
+ mod, err := caddy.GetModule(caddy.GetModuleName(handler))
+ if err != nil {
+ // TODO: append to warnings
+ }
+ var matcherSetsRaw []map[string]json.RawMessage
+ if matcherSet != nil {
+ matcherSetsRaw = append(matcherSetsRaw, matcherSet)
+ }
+ return []ConfigValue{
+ {
+ Class: "route",
+ Value: caddyhttp.Route{
+ MatcherSetsRaw: matcherSetsRaw,
+ HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", mod.ID(), h.warnings)},
+ },
+ },
+ }
+}
+
+// NewBindAddresses returns config values relevant to adding
+// listener bind addresses to the config.
+func (h Helper) NewBindAddresses(addrs []string) []ConfigValue {
+ return []ConfigValue{{Class: "bind", Value: addrs}}
+}
+
+// NewVarsRoute returns config values relevant to adding a
+// "vars" wrapper route to the config.
+func (h Helper) NewVarsRoute(route caddyhttp.Route) []ConfigValue {
+ return []ConfigValue{{Class: "var", Value: route}}
+}
+
+// ConfigValue represents a value to be added to the final
+// configuration, or a value to be consulted when building
+// the final configuration.
+type ConfigValue struct {
+ // The kind of value this is. As the config is
+ // being built, the adapter will look in the
+ // "pile" for values belonging to a certain
+ // class when it is setting up a certain part
+ // of the config. The associated value will be
+ // type-asserted and placed accordingly.
+ Class string
+
+ // The value to be used when building the config.
+ // Generally its type is associated with the
+ // name of the Class.
+ Value interface{}
+
+ directive string
+}
+
+// serverBlock pairs a Caddyfile server block
+// with a "pile" of config values, keyed by class
+// name.
+type serverBlock struct {
+ block caddyfile.ServerBlock
+ pile map[string][]ConfigValue // config values obtained from directives
+}
+
+type (
+ // UnmarshalFunc is a function which can unmarshal Caddyfile
+ // tokens into zero or more config values using a Helper type.
+ // These are passed in a call to RegisterDirective.
+ UnmarshalFunc func(h Helper) ([]ConfigValue, error)
+
+ // UnmarshalHandlerFunc is like UnmarshalFunc, except the
+ // output of the unmarshaling is an HTTP handler. This
+ // function does not need to deal with HTTP request matching
+ // which is abstracted away. Since writing HTTP handlers
+ // with Caddyfile support is very common, this is a more
+ // convenient way to add a handler to the chain since a lot
+ // of the details common to HTTP handlers are taken care of
+ // for you. These are passed to a call to
+ // RegisterHandlerDirective.
+ UnmarshalHandlerFunc func(h Helper) (caddyhttp.MiddlewareHandler, error)
+)
+
+var registeredDirectives = make(map[string]UnmarshalFunc)
diff --git a/caddyconfig/httpcaddyfile/handlers.go b/caddyconfig/httpcaddyfile/handlers.go
index a90aa4a..9a29e97 100644
--- a/caddyconfig/httpcaddyfile/handlers.go
+++ b/caddyconfig/httpcaddyfile/handlers.go
@@ -17,7 +17,6 @@ package httpcaddyfile
import (
"encoding/json"
"fmt"
- "log"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
@@ -55,38 +54,3 @@ func (st *ServerType) parseMatcherDefinitions(d *caddyfile.Dispenser) (map[strin
}
return matchers, nil
}
-
-// directiveBuckets returns a list of middleware/handler directives.
-// Buckets are ordered, and directives should be evaluated in their
-// bucket order. Within a bucket, directives are not ordered. Hence,
-// the return value has a slice of buckets, where each bucket is a
-// map, which is a strongly-typed reminder that directives within a
-// bucket are not ordered.
-func directiveBuckets() []map[string]struct{} {
- directiveBuckets := []map[string]struct{}{
- // prefer odd-numbered buckets; evens are there for contingencies
- {}, // 0
- {}, // 1 - keep empty unless necessary
- {}, // 2
- {}, // 3 - first handlers, last responders
- {}, // 4
- {}, // 5 - middle of chain
- {}, // 6
- {}, // 7 - last handlers, first responders
- {}, // 8
- {}, // 9 - keep empty unless necessary
- {}, // 10
- }
- for _, mod := range caddy.GetModules("http.handlers") {
- if hd, ok := mod.New().(HandlerDirective); ok {
- bucket := hd.Bucket()
- if bucket < 0 || bucket >= len(directiveBuckets) {
- log.Printf("[ERROR] directive %s: bucket out of range [0-%d): %d; skipping",
- mod.Name, len(directiveBuckets), bucket)
- continue
- }
- directiveBuckets[bucket][mod.ID()] = struct{}{}
- }
- }
- return directiveBuckets
-}
diff --git a/caddyconfig/httpcaddyfile/httptype.go b/caddyconfig/httpcaddyfile/httptype.go
index e5bf048..1c12ccd 100644
--- a/caddyconfig/httpcaddyfile/httptype.go
+++ b/caddyconfig/httpcaddyfile/httptype.go
@@ -17,17 +17,18 @@ package httpcaddyfile
import (
"encoding/json"
"fmt"
+ "log"
"reflect"
+ "sort"
"strconv"
"strings"
- "github.com/mholt/certmagic"
-
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
"github.com/caddyserver/caddy/v2/modules/caddyhttp"
"github.com/caddyserver/caddy/v2/modules/caddytls"
+ "github.com/mholt/certmagic"
)
func init() {
@@ -38,24 +39,57 @@ func init() {
type ServerType struct {
}
-// ValidDirectives returns the list of known directives.
-func (ServerType) ValidDirectives() []string {
- dirs := []string{"matcher", "root", "tls", "redir"} // TODO: put special-case (hard-coded, or non-handler) directives here
- for _, mod := range caddy.GetModules("http.handlers") {
- if _, ok := mod.New().(HandlerDirective); ok {
- dirs = append(dirs, mod.ID())
- }
- }
- return dirs
-}
+// TODO: error on unrecognized directives
// Setup makes a config from the tokens.
func (st ServerType) Setup(originalServerBlocks []caddyfile.ServerBlock,
options map[string]string) (*caddy.Config, []caddyconfig.Warning, error) {
var warnings []caddyconfig.Warning
+ var serverBlocks []serverBlock
+ for _, sblock := range originalServerBlocks {
+ serverBlocks = append(serverBlocks, serverBlock{
+ block: sblock,
+ pile: make(map[string][]ConfigValue),
+ })
+ }
+
+ for _, sb := range serverBlocks {
+ // extract matcher definitions
+ d := sb.block.DispenseDirective("matcher")
+ matcherDefs, err := st.parseMatcherDefinitions(d)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ for _, segment := range sb.block.Segments {
+ dir := segment.Directive()
+ if dir == "matcher" {
+ // TODO: This is a special case because we pre-processed it; handle this better
+ continue
+ }
+ if dirFunc, ok := registeredDirectives[dir]; ok {
+ results, err := dirFunc(Helper{
+ Dispenser: segment.NewDispenser(),
+ warnings: &warnings,
+ matcherDefs: matcherDefs,
+ })
+ if err != nil {
+ return nil, warnings, fmt.Errorf("parsing caddyfile tokens for '%s': %v", dir, err)
+ }
+ for _, result := range results {
+ result.directive = dir
+ sb.pile[result.Class] = append(sb.pile[result.Class], result)
+ }
+ } else {
+ // TODO: this should be an error
+ log.Printf("%s not registered", dir)
+ }
+ }
+ }
+
// map
- sbmap, err := st.mapAddressToServerBlocks(originalServerBlocks)
+ sbmap, err := st.mapAddressToServerBlocks(serverBlocks)
if err != nil {
return nil, warnings, err
}
@@ -63,6 +97,22 @@ func (st ServerType) Setup(originalServerBlocks []caddyfile.ServerBlock,
// reduce
pairings := st.consolidateAddrMappings(sbmap)
+ // TODO: shorthand placeholders
+ // for _, p := range pairings {
+ // for _, sblock := range p.serverBlocks {
+ // for _, tokens := range sblock.Tokens {
+ // for i := 0; i < len(tokens); i++ {
+ // switch tokens[i].Text {
+ // case "{uri}":
+ // tokens[i].Text = "{http.request.uri}"
+ // case "{path}":
+ // tokens[i].Text = "{http.request.uri.path}"
+ // }
+ // }
+ // }
+ // }
+ // }
+
// each pairing of listener addresses to list of server
// blocks is basically a server definition
servers, err := st.serversFromPairings(pairings, &warnings)
@@ -81,45 +131,33 @@ func (st ServerType) Setup(originalServerBlocks []caddyfile.ServerBlock,
tlsApp := caddytls.TLS{Certificates: make(map[string]json.RawMessage)}
for _, p := range pairings {
for _, sblock := range p.serverBlocks {
- if tkns, ok := sblock.Tokens["tls"]; ok {
- // extract all unique hostnames from the server block
- // keys, then convert to a slice for use in the TLS app
- hostMap := make(map[string]struct{})
- for _, sblockKey := range sblock.Keys {
- addr, err := standardizeAddress(sblockKey)
+ // tls automation policies
+ if mmVals, ok := sblock.pile["tls.automation_manager"]; ok {
+ for _, mmVal := range mmVals {
+ mm := mmVal.Value.(caddytls.ManagerMaker)
+ sblockHosts, err := st.autoHTTPSHosts(sblock)
if err != nil {
- return nil, warnings, fmt.Errorf("parsing server block key: %v", err)
+ return nil, warnings, err
}
- hostMap[addr.Host] = struct{}{}
- }
- sblockHosts := make([]string, 0, len(hostMap))
- for host := range hostMap {
- sblockHosts = append(sblockHosts, host)
+ tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, caddytls.AutomationPolicy{
+ Hosts: sblockHosts,
+ ManagementRaw: caddyconfig.JSONModuleObject(mm, "module", mm.(caddy.Module).CaddyModule().ID(), &warnings),
+ })
}
+ }
- // parse tokens to get ACME manager config
- acmeMgr, err := st.parseTLSAutomationManager(caddyfile.NewDispenser("Caddyfile", tkns))
- if err != nil {
- return nil, warnings, err
- }
-
- tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, caddytls.AutomationPolicy{
- Hosts: sblockHosts,
- ManagementRaw: caddyconfig.JSONModuleObject(acmeMgr, "module", "acme", &warnings),
- })
-
- // parse tokens to get certificates to be loaded manually
- certLoaders, err := st.parseTLSCerts(caddyfile.NewDispenser("Caddyfile", tkns))
- if err != nil {
- return nil, nil, err
- }
- for loaderName, loader := range certLoaders {
+ // tls certificate loaders
+ if clVals, ok := sblock.pile["tls.certificate_loader"]; ok {
+ for _, clVal := range clVals {
+ loader := clVal.Value.(caddytls.CertificateLoader)
+ loaderName := caddy.GetModuleName(loader)
tlsApp.Certificates[loaderName] = caddyconfig.JSON(loader, &warnings)
}
-
}
}
}
+ // consolidate automation policies that are the exact same
+ tlsApp.Automation.Policies = consolidateAutomationPolicies(tlsApp.Automation.Policies)
// annnd the top-level config, then we're done!
cfg := &caddy.Config{AppsRaw: make(map[string]json.RawMessage)}
@@ -140,10 +178,11 @@ func (st *ServerType) hostsFromServerBlockKeys(sb caddyfile.ServerBlock) ([]stri
// first get each unique hostname
hostMap := make(map[string]struct{})
for _, sblockKey := range sb.Keys {
- addr, err := standardizeAddress(sblockKey)
+ addr, err := ParseAddress(sblockKey)
if err != nil {
return nil, fmt.Errorf("parsing server block key: %v", err)
}
+ addr = addr.Normalize()
hostMap[addr.Host] = struct{}{}
}
@@ -167,121 +206,75 @@ func (st *ServerType) serversFromPairings(pairings []sbAddrAssociation, warnings
}
for _, sblock := range p.serverBlocks {
- matcherSetsEnc, err := st.compileEncodedMatcherSets(sblock)
+ matcherSetsEnc, err := st.compileEncodedMatcherSets(sblock.block)
if err != nil {
- return nil, fmt.Errorf("server block %v: compiling matcher sets: %v", sblock.Keys, err)
- }
-
- // extract matcher definitions
- d := caddyfile.NewDispenser("Caddyfile", sblock.Tokens["matcher"])
- matcherDefs, err := st.parseMatcherDefinitions(d)
- if err != nil {
- return nil, err
+ return nil, fmt.Errorf("server block %v: compiling matcher sets: %v", sblock.block.Keys, err)
}
+ // if there are user-defined variables, then siteVarSubroute will
+ // wrap the handlerSubroute; otherwise handlerSubroute will be the
+ // site's primary subroute.
siteVarSubroute, handlerSubroute := new(caddyhttp.Subroute), new(caddyhttp.Subroute)
- // built-in directives
+ // tls: connection policies and toggle auto HTTPS
- // root: path to root of site
- if tkns, ok := sblock.Tokens["root"]; ok {
- routes, err := st.parseRoot(tkns, matcherDefs, warnings)
- if err != nil {
- return nil, err
- }
- siteVarSubroute.Routes = append(siteVarSubroute.Routes, routes...)
+ autoHTTPSQualifiedHosts, err := st.autoHTTPSHosts(sblock)
+ if err != nil {
+ return nil, err
}
-
- // tls: off and conn policies
- if tkns, ok := sblock.Tokens["tls"]; ok {
- // get the hosts for this server block...
- hosts, err := st.hostsFromServerBlockKeys(sblock)
- if err != nil {
- return nil, err
+ if _, ok := sblock.pile["tls.off"]; ok {
+ // tls off: disable TLS (and automatic HTTPS) for server block's names
+ if srv.AutoHTTPS == nil {
+ srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
}
-
- // ...and of those, which ones qualify for auto HTTPS
- var autoHTTPSQualifiedHosts []string
- for _, h := range hosts {
- if certmagic.HostQualifies(h) {
- autoHTTPSQualifiedHosts = append(autoHTTPSQualifiedHosts, h)
- }
- }
-
- if len(tkns) == 2 && tkns[1].Text == "off" {
- // tls off: disable TLS (and automatic HTTPS) for server block's names
- if srv.AutoHTTPS == nil {
- srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
- }
- srv.AutoHTTPS.Skip = append(srv.AutoHTTPS.Skip, autoHTTPSQualifiedHosts...)
- } else {
- // tls connection policies
- cp, err := st.parseTLSConnPolicy(caddyfile.NewDispenser("Caddyfile", tkns))
- if err != nil {
- return nil, err
- }
- // TODO: are matchers needed if every hostname of the config is matched?
- cp.Matchers = map[string]json.RawMessage{
- "sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones
- }
- srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp)
- }
- }
-
- // set up each handler directive
- for _, dirBucket := range directiveBuckets() {
- for dir := range dirBucket {
- // keep in mind that multiple occurrences of the directive may appear here
- tkns, ok := sblock.Tokens[dir]
- if !ok {
- continue
- }
-
- // extract matcher sets from matcher tokens, if any
- matcherSetsMap, err := st.tokensToMatcherSets(tkns, matcherDefs, warnings)
-
- mod, err := caddy.GetModule("http.handlers." + dir)
- if err != nil {
- return nil, fmt.Errorf("getting handler module '%s': %v", mod.Name, err)
- }
-
- // the tokens have been divided by matcher set for us,
- // so iterate each one and set them up
- for _, mst := range matcherSetsMap {
- unm, ok := mod.New().(caddyfile.Unmarshaler)
- if !ok {
- return nil, fmt.Errorf("handler module '%s' is not a Caddyfile unmarshaler", mod.Name)
- }
- err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(d.File(), mst.tokens))
+ srv.AutoHTTPS.Skip = append(srv.AutoHTTPS.Skip, autoHTTPSQualifiedHosts...)
+ } else if cpVals, ok := sblock.pile["tls.connection_policy"]; ok {
+ // tls connection policies
+ for _, cpVal := range cpVals {
+ cp := cpVal.Value.(*caddytls.ConnectionPolicy)
+ // only create if there is a non-empty policy
+ if !reflect.DeepEqual(cp, new(caddytls.ConnectionPolicy)) {
+ // make sure the policy covers all hostnames from the block
+ hosts, err := st.hostsFromServerBlockKeys(sblock.block)
if err != nil {
return nil, err
}
- handler, ok := unm.(caddyhttp.MiddlewareHandler)
- if !ok {
- return nil, fmt.Errorf("handler module '%s' does not implement caddyhttp.MiddlewareHandler interface", mod.Name)
- }
- route := caddyhttp.Route{
- Handle: []json.RawMessage{
- caddyconfig.JSONModuleObject(handler, "handler", dir, warnings),
- },
- }
- if mst.matcherSet != nil {
- route.MatcherSets = []map[string]json.RawMessage{mst.matcherSet}
+ // TODO: are matchers needed if every hostname of the config is matched?
+ cp.Matchers = map[string]json.RawMessage{
+ "sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones
}
- handlerSubroute.Routes = append(handlerSubroute.Routes, route)
+ srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp)
}
-
}
+ // TODO: consolidate equal conn policies
}
- // redir: static responses that redirect
- if tkns, ok := sblock.Tokens["redir"]; ok {
- routes, err := st.parseRedir(tkns, matcherDefs, warnings)
- if err != nil {
- return nil, err
+ // vars: special routes that will have to wrap the normal handlers
+ // so that these variables can be used across their matchers too
+ for _, cfgVal := range sblock.pile["var"] {
+ siteVarSubroute.Routes = append(siteVarSubroute.Routes, cfgVal.Value.(caddyhttp.Route))
+ }
+
+ // set up each handler directive
+ dirRoutes := sblock.pile["route"]
+ // TODO: The ordering here depends on... if there is a list of
+ // directives to use, then sort by that, otherwise just use in
+ // the order they appear in the slice (which is the order they
+ // appeared in the Caddyfile)
+ sortByList := true
+ if sortByList {
+ dirPositions := make(map[string]int)
+ for i, dir := range defaultDirectiveOrder {
+ dirPositions[dir] = i
}
- handlerSubroute.Routes = append(handlerSubroute.Routes, routes...)
+ sort.SliceStable(dirRoutes, func(i, j int) bool {
+ iDir, jDir := dirRoutes[i].directive, dirRoutes[j].directive
+ return dirPositions[iDir] < dirPositions[jDir]
+ })
+ }
+ for _, r := range dirRoutes {
+ handlerSubroute.Routes = append(handlerSubroute.Routes, r.Value.(caddyhttp.Route))
}
// the route that contains the site's handlers will
@@ -298,7 +291,7 @@ func (st *ServerType) serversFromPairings(pairings []sbAddrAssociation, warnings
siteSubroute.Routes = append(
siteVarSubroute.Routes,
caddyhttp.Route{
- Handle: []json.RawMessage{
+ HandlersRaw: []json.RawMessage{
caddyconfig.JSONModuleObject(subSubRoute, "handler", "subroute", warnings),
},
},
@@ -308,8 +301,8 @@ func (st *ServerType) serversFromPairings(pairings []sbAddrAssociation, warnings
siteSubroute.Routes = consolidateRoutes(siteSubroute.Routes)
srv.Routes = append(srv.Routes, caddyhttp.Route{
- MatcherSets: matcherSetsEnc,
- Handle: []json.RawMessage{
+ MatcherSetsRaw: matcherSetsEnc,
+ HandlersRaw: []json.RawMessage{
caddyconfig.JSONModuleObject(siteSubroute, "handler", "subroute", warnings),
},
})
@@ -323,16 +316,32 @@ func (st *ServerType) serversFromPairings(pairings []sbAddrAssociation, warnings
return servers, nil
}
+func (st ServerType) autoHTTPSHosts(sb serverBlock) ([]string, error) {
+ // get the hosts for this server block...
+ hosts, err := st.hostsFromServerBlockKeys(sb.block)
+ if err != nil {
+ return nil, err
+ }
+ // ...and of those, which ones qualify for auto HTTPS
+ var autoHTTPSQualifiedHosts []string
+ for _, h := range hosts {
+ if certmagic.HostQualifies(h) {
+ autoHTTPSQualifiedHosts = append(autoHTTPSQualifiedHosts, h)
+ }
+ }
+ return autoHTTPSQualifiedHosts, nil
+}
+
// consolidateRoutes combines routes with the same properties
// (same matchers, same Terminal and Group settings) for a
// cleaner overall output.
func consolidateRoutes(routes caddyhttp.RouteList) caddyhttp.RouteList {
for i := 0; i < len(routes)-1; i++ {
- if reflect.DeepEqual(routes[i].MatcherSets, routes[i+1].MatcherSets) &&
+ if reflect.DeepEqual(routes[i].MatcherSetsRaw, routes[i+1].MatcherSetsRaw) &&
routes[i].Terminal == routes[i+1].Terminal &&
routes[i].Group == routes[i+1].Group {
// keep the handlers in the same order, then splice out repetitive route
- routes[i].Handle = append(routes[i].Handle, routes[i+1].Handle...)
+ routes[i].HandlersRaw = append(routes[i].HandlersRaw, routes[i+1].HandlersRaw...)
routes = append(routes[:i+1], routes[i+2:]...)
i--
}
@@ -340,53 +349,26 @@ func consolidateRoutes(routes caddyhttp.RouteList) caddyhttp.RouteList {
return routes
}
-func (st *ServerType) tokensToMatcherSets(
- tkns []caddyfile.Token,
- matcherDefs map[string]map[string]json.RawMessage,
- warnings *[]caddyconfig.Warning,
-) (map[string]matcherSetAndTokens, error) {
- m := make(map[string]matcherSetAndTokens)
-
- for len(tkns) > 0 {
- d := caddyfile.NewDispenser("Caddyfile", tkns)
- d.Next() // consume directive token
-
- // look for matcher; it should be the next argument
- var matcherToken caddyfile.Token
- var matcherSet map[string]json.RawMessage
- if d.NextArg() {
- var ok bool
- var err error
- matcherSet, ok, err = st.matcherSetFromMatcherToken(d.Token(), matcherDefs, warnings)
- if err != nil {
- return nil, err
- }
- if ok {
- // found a matcher; save it, then splice it out
- // since we don't want to parse it again
- matcherToken = d.Token()
- tkns = d.Delete()
+// consolidateAutomationPolicies combines automation policies that are the same,
+// for a cleaner overall output.
+func consolidateAutomationPolicies(aps []caddytls.AutomationPolicy) []caddytls.AutomationPolicy {
+ for i := 0; i < len(aps); i++ {
+ for j := 0; j < len(aps); j++ {
+ if j == i {
+ continue
}
- d.RemainingArgs() // advance to end of line
- }
- for d.NextBlock() {
- // skip entire block including any nested blocks; all
- // we care about is accessing next directive occurrence
- for d.Nested() {
- d.NextBlock()
+ if reflect.DeepEqual(aps[i].ManagementRaw, aps[j].ManagementRaw) {
+ aps[i].Hosts = append(aps[i].Hosts, aps[j].Hosts...)
}
+ aps = append(aps[:j], aps[j+1:]...)
+ i--
+ break
}
- end := d.Cursor() + 1
- m[matcherToken.Text] = matcherSetAndTokens{
- matcherSet: matcherSet,
- tokens: append(m[matcherToken.Text].tokens, tkns[:end]...),
- }
- tkns = tkns[end:]
}
- return m, nil
+ return aps
}
-func (st *ServerType) matcherSetFromMatcherToken(
+func matcherSetFromMatcherToken(
tkn caddyfile.Token,
matcherDefs map[string]map[string]json.RawMessage,
warnings *[]caddyconfig.Warning,
@@ -424,10 +406,11 @@ func (st *ServerType) compileEncodedMatcherSets(sblock caddyfile.ServerBlock) ([
var matcherPairs []*hostPathPair
for _, key := range sblock.Keys {
- addr, err := standardizeAddress(key)
+ addr, err := ParseAddress(key)
if err != nil {
return nil, fmt.Errorf("server block %v: parsing and standardizing address '%s': %v", sblock.Keys, key, err)
}
+ addr = addr.Normalize()
// choose a matcher pair that should be shared by this
// server block; if none exists yet, create one
@@ -504,14 +487,6 @@ func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcher) (map[string]
return msEncoded, nil
}
-// HandlerDirective implements a directive for an HTTP handler,
-// in that it can unmarshal its own configuration from Caddyfile
-// tokens and also specify which directive bucket it belongs in.
-type HandlerDirective interface {
- caddyfile.Unmarshaler
- Bucket() int
-}
-
// tryInt tries to convert str to an integer. If it fails, it downgrades
// the error to a warning and returns 0.
func tryInt(str string, warnings *[]caddyconfig.Warning) int {
@@ -535,7 +510,7 @@ type matcherSetAndTokens struct {
// served on those addresses.
type sbAddrAssociation struct {
addresses []string
- serverBlocks []caddyfile.ServerBlock
+ serverBlocks []serverBlock
}
// Interface guard