diff options
author | Matthew Holt <mholt@users.noreply.github.com> | 2019-08-09 12:05:47 -0600 |
---|---|---|
committer | Matthew Holt <mholt@users.noreply.github.com> | 2019-08-09 12:05:47 -0600 |
commit | ab885f07b844fd60adb9d49ed7884f3cd2d939a7 (patch) | |
tree | 8827ad88cf3da8982154e2fda46f53274342785d /caddyconfig | |
parent | 4950ce485f7d931890fcfd2ee287b6df1b5db435 (diff) |
Implement config adapters and beginning of Caddyfile adapter
Along with several other changes, such as renaming caddyhttp.ServerRoute
to caddyhttp.Route, exporting some types that were not exported before,
and tweaking the caddytls TLS values to be more consistent.
Notably, we also now disable automatic cert management for names which
already have a cert (manually) loaded into the cache. These names no
longer need to be specified in the "skip_certificates" field of the
automatic HTTPS config, because they will be skipped automatically.
Diffstat (limited to 'caddyconfig')
-rw-r--r-- | caddyconfig/caddyfile/adapter.go | 93 | ||||
-rwxr-xr-x | caddyconfig/caddyfile/dispenser.go | 333 | ||||
-rwxr-xr-x | caddyconfig/caddyfile/dispenser_test.go | 316 | ||||
-rwxr-xr-x | caddyconfig/caddyfile/lexer.go | 150 | ||||
-rwxr-xr-x | caddyconfig/caddyfile/lexer_test.go | 196 | ||||
-rwxr-xr-x | caddyconfig/caddyfile/parse.go | 492 | ||||
-rwxr-xr-x | caddyconfig/caddyfile/parse_test.go | 718 | ||||
-rwxr-xr-x | caddyconfig/caddyfile/testdata/import_glob0.txt | 6 | ||||
-rwxr-xr-x | caddyconfig/caddyfile/testdata/import_glob1.txt | 4 | ||||
-rwxr-xr-x | caddyconfig/caddyfile/testdata/import_glob2.txt | 3 | ||||
-rwxr-xr-x | caddyconfig/caddyfile/testdata/import_test1.txt | 2 | ||||
-rwxr-xr-x | caddyconfig/caddyfile/testdata/import_test2.txt | 4 | ||||
-rw-r--r-- | caddyconfig/configadapters.go | 113 | ||||
-rw-r--r-- | caddyconfig/httpcaddyfile/addresses.go | 350 | ||||
-rw-r--r-- | caddyconfig/httpcaddyfile/addresses_test.go | 129 | ||||
-rw-r--r-- | caddyconfig/httpcaddyfile/builtins.go | 257 | ||||
-rw-r--r-- | caddyconfig/httpcaddyfile/handlers.go | 92 | ||||
-rw-r--r-- | caddyconfig/httpcaddyfile/httptype.go | 542 |
18 files changed, 3800 insertions, 0 deletions
diff --git a/caddyconfig/caddyfile/adapter.go b/caddyconfig/caddyfile/adapter.go new file mode 100644 index 0000000..ab4905a --- /dev/null +++ b/caddyconfig/caddyfile/adapter.go @@ -0,0 +1,93 @@ +// Copyright 2015 Matthew Holt and The Caddy Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package caddyfile + +import ( + "bytes" + "encoding/json" + "fmt" + + "github.com/caddyserver/caddy/v2" + "github.com/caddyserver/caddy/v2/caddyconfig" +) + +// Adapter adapts Caddyfile to Caddy JSON. +type Adapter struct { + ServerType ServerType +} + +// Adapt converts the Caddyfile config in body to Caddy JSON. +func (a Adapter) Adapt(body []byte, options map[string]string) ([]byte, []caddyconfig.Warning, error) { + if a.ServerType == nil { + return nil, nil, fmt.Errorf("no server type") + } + if options == nil { + options = make(map[string]string) + } + + directives := a.ServerType.ValidDirectives() + + filename := options["filename"] + if filename == "" { + filename = "Caddyfile" + } + + serverBlocks, err := Parse(filename, bytes.NewReader(body), directives) + if err != nil { + return nil, nil, err + } + + cfg, warnings, err := a.ServerType.Setup(serverBlocks, options) + if err != nil { + return nil, warnings, err + } + + marshalFunc := json.Marshal + if options["pretty"] == "true" { + marshalFunc = caddyconfig.JSONIndent + } + result, err := marshalFunc(cfg) + + return result, warnings, err +} + +// Unmarshaler is a type that can unmarshal +// Caddyfile tokens to set itself up for a +// JSON encoding. The goal of an unmarshaler +// is not to set itself up for actual use, +// but to set itself up for being marshaled +// into JSON. Caddyfile-unmarshaled values +// will not be used directly; they will be +// encoded as JSON and then used from that. +type Unmarshaler interface { + UnmarshalCaddyfile(d *Dispenser) error +} + +// ServerType is a type that can evaluate a Caddyfile and set up a caddy config. +type ServerType interface { + // ValidDirectives returns a list of the + // server type's recognized directives. + ValidDirectives() []string + + // Setup takes the server blocks which + // contain tokens, as well as options + // (e.g. CLI flags) and creates a Caddy + // config, along with any warnings or + // an error. + Setup([]ServerBlock, map[string]string) (*caddy.Config, []caddyconfig.Warning, error) +} + +// Interface guard +var _ caddyconfig.Adapter = (*Adapter)(nil) diff --git a/caddyconfig/caddyfile/dispenser.go b/caddyconfig/caddyfile/dispenser.go new file mode 100755 index 0000000..1cf5d04 --- /dev/null +++ b/caddyconfig/caddyfile/dispenser.go @@ -0,0 +1,333 @@ +// Copyright 2015 Matthew Holt and The Caddy Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package caddyfile + +import ( + "errors" + "fmt" + "strings" +) + +// Dispenser is a type that dispenses tokens, similarly to a lexer, +// except that it can do so with some notion of structure. An empty +// Dispenser is invalid; call NewDispenser to make a proper instance. +type Dispenser struct { + filename string + tokens []Token + cursor int + nesting int +} + +// NewDispenser returns a Dispenser filled with the given tokens. +func NewDispenser(filename string, tokens []Token) *Dispenser { + return &Dispenser{ + filename: filename, + tokens: tokens, + cursor: -1, + } +} + +// Next loads the next token. Returns true if a token +// was loaded; false otherwise. If false, all tokens +// have been consumed. +func (d *Dispenser) Next() bool { + if d.cursor < len(d.tokens)-1 { + d.cursor++ + return true + } + return false +} + +// Prev moves to the previous token. It does the inverse +// of Next(). Generally, this should only be used in +// special cases such as deleting a token from the slice +// that d is iterating. In that case, without using Prev(), +// the dispenser would be pointing at the wrong token since +// deleting a token implicitly advances the cursor. +func (d *Dispenser) Prev() bool { + if d.cursor > 0 { + d.cursor-- + return true + } + return false +} + +// NextArg loads the next token if it is on the same +// line and if it is not a block opening (open curly +// brace). Returns true if an argument token was +// loaded; false otherwise. If false, all tokens on +// the line have been consumed except for potentially +// a block opening. It handles imported tokens +// correctly. +func (d *Dispenser) NextArg() bool { + if !d.nextOnSameLine() { + return false + } + if d.Val() == "{" { + // roll back; a block opening is not an argument + d.cursor-- + return false + } + return true +} + +// nextOnSameLine advances the cursor if the next +// token is on the same line of the same file. +func (d *Dispenser) nextOnSameLine() bool { + if d.cursor < 0 { + d.cursor++ + return true + } + if d.cursor >= len(d.tokens) { + return false + } + if d.cursor < len(d.tokens)-1 && + d.tokens[d.cursor].File == d.tokens[d.cursor+1].File && + d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) == d.tokens[d.cursor+1].Line { + d.cursor++ + return true + } + return false +} + +// NextLine loads the next token only if it is not on the same +// line as the current token, and returns true if a token was +// loaded; false otherwise. If false, there is not another token +// or it is on the same line. It handles imported tokens correctly. +func (d *Dispenser) NextLine() bool { + if d.cursor < 0 { + d.cursor++ + return true + } + if d.cursor >= len(d.tokens) { + return false + } + if d.cursor < len(d.tokens)-1 && + (d.tokens[d.cursor].File != d.tokens[d.cursor+1].File || + d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) < d.tokens[d.cursor+1].Line) { + d.cursor++ + return true + } + return false +} + +// NextBlock can be used as the condition of a for loop +// to load the next token as long as it opens a block or +// is already in a block. It returns true if a token was +// loaded, or false when the block's closing curly brace +// was loaded and thus the block ended. Nested blocks are +// not supported. +func (d *Dispenser) NextBlock() bool { + if d.nesting > 0 { + d.Next() + if d.Val() == "}" { + d.nesting-- + return false + } + return true + } + if !d.nextOnSameLine() { // block must open on same line + return false + } + if d.Val() != "{" { + d.cursor-- // roll back if not opening brace + return false + } + d.Next() + if d.Val() == "}" { + // open and then closed right away + return false + } + d.nesting++ + return true +} + +// Nested returns true if the token is currently nested +// inside a block (i.e. an open curly brace was consumed). +func (d *Dispenser) Nested() bool { + return d.nesting > 0 +} + +// Val gets the text of the current token. If there is no token +// loaded, it returns empty string. +func (d *Dispenser) Val() string { + if d.cursor < 0 || d.cursor >= len(d.tokens) { + return "" + } + return d.tokens[d.cursor].Text +} + +// Line gets the line number of the current token. If there is no token +// loaded, it returns 0. +func (d *Dispenser) Line() int { + if d.cursor < 0 || d.cursor >= len(d.tokens) { + return 0 + } + return d.tokens[d.cursor].Line +} + +// File gets the filename of the current token. If there is no token loaded, +// it returns the filename originally given when parsing started. +func (d *Dispenser) File() string { + if d.cursor < 0 || d.cursor >= len(d.tokens) { + return d.filename + } + if tokenFilename := d.tokens[d.cursor].File; tokenFilename != "" { + return tokenFilename + } + return d.filename +} + +// Args is a convenience function that loads the next arguments +// (tokens on the same line) into an arbitrary number of strings +// pointed to in targets. If there are fewer tokens available +// than string pointers, the remaining strings will not be changed +// and false will be returned. If there were enough tokens available +// to fill the arguments, then true will be returned. +func (d *Dispenser) Args(targets ...*string) bool { + for i := 0; i < len(targets); i++ { + if !d.NextArg() { + return false + } + *targets[i] = d.Val() + } + return true +} + +// RemainingArgs loads any more arguments (tokens on the same line) +// into a slice and returns them. Open curly brace tokens also indicate +// the end of arguments, and the curly brace is not included in +// the return value nor is it loaded. +func (d *Dispenser) RemainingArgs() []string { + var args []string + for d.NextArg() { + args = append(args, d.Val()) + } + return args +} + +// NewFromNextTokens returns a new dispenser with a copy of +// the tokens from the current token until the end of the +// "directive" whether that be to the end of the line or +// the end of a block that starts at the end of the line. +func (d *Dispenser) NewFromNextTokens() *Dispenser { + var tkns []Token + tkns = append(tkns, d.Token()) + for d.NextArg() { + tkns = append(tkns, d.Token()) + } + if d.Next() && d.Val() == "{" { + tkns = append(tkns, d.Token()) + for d.NextBlock() { + for d.Nested() { + tkns = append(tkns, d.Token()) + d.NextBlock() + } + } + tkns = append(tkns, d.Token()) + } else { + d.cursor-- + } + return NewDispenser(d.filename, tkns) +} + +// Token returns the current token. +func (d *Dispenser) Token() Token { + if d.cursor < 0 || d.cursor >= len(d.tokens) { + return Token{} + } + return d.tokens[d.cursor] +} + +// Cursor returns the current cursor (token index). +func (d *Dispenser) Cursor() int { + return d.cursor +} + +// ArgErr returns an argument error, meaning that another +// argument was expected but not found. In other words, +// a line break or open curly brace was encountered instead of +// an argument. +func (d *Dispenser) ArgErr() error { + if d.Val() == "{" { + return d.Err("Unexpected token '{', expecting argument") + } + return d.Errf("Wrong argument count or unexpected line ending after '%s'", d.Val()) +} + +// SyntaxErr creates a generic syntax error which explains what was +// found and what was expected. +func (d *Dispenser) SyntaxErr(expected string) error { + msg := fmt.Sprintf("%s:%d - Syntax error: Unexpected token '%s', expecting '%s'", d.File(), d.Line(), d.Val(), expected) + return errors.New(msg) +} + +// EOFErr returns an error indicating that the dispenser reached +// the end of the input when searching for the next token. +func (d *Dispenser) EOFErr() error { + return d.Errf("Unexpected EOF") +} + +// Err generates a custom parse-time error with a message of msg. +func (d *Dispenser) Err(msg string) error { + msg = fmt.Sprintf("%s:%d - Error during parsing: %s", d.File(), d.Line(), msg) + return errors.New(msg) +} + +// Errf is like Err, but for formatted error messages +func (d *Dispenser) Errf(format string, args ...interface{}) error { + return d.Err(fmt.Sprintf(format, args...)) +} + +// Delete deletes the current token and returns the updated slice +// of tokens. The cursor is not advanced to the next token. +// Because deletion modifies the underlying slice, this method +// should only be called if you have access to the original slice +// of tokens and/or are using the slice of tokens outside this +// Dispenser instance. If you do not re-assign the slice with the +// return value of this method, inconsistencies in the token +// array will become apparent (or worse, hide from you like they +// did me for 3 and a half freaking hours late one night). +func (d *Dispenser) Delete() []Token { + if d.cursor >= 0 && d.cursor < len(d.tokens)-1 { + d.tokens = append(d.tokens[:d.cursor], d.tokens[d.cursor+1:]...) + d.cursor-- + } + return d.tokens +} + +// numLineBreaks counts how many line breaks are in the token +// value given by the token index tknIdx. It returns 0 if the +// token does not exist or there are no line breaks. +func (d *Dispenser) numLineBreaks(tknIdx int) int { + if tknIdx < 0 || tknIdx >= len(d.tokens) { + return 0 + } + return strings.Count(d.tokens[tknIdx].Text, "\n") +} + +// isNewLine determines whether the current token is on a different +// line (higher line number) than the previous token. It handles imported +// tokens correctly. If there isn't a previous token, it returns true. +func (d *Dispenser) isNewLine() bool { + if d.cursor < 1 { + return true + } + if d.cursor > len(d.tokens)-1 { + return false + } + return d.tokens[d.cursor-1].File != d.tokens[d.cursor].File || + d.tokens[d.cursor-1].Line+d.numLineBreaks(d.cursor-1) < d.tokens[d.cursor].Line +} diff --git a/caddyconfig/caddyfile/dispenser_test.go b/caddyconfig/caddyfile/dispenser_test.go new file mode 100755 index 0000000..9860bed --- /dev/null +++ b/caddyconfig/caddyfile/dispenser_test.go @@ -0,0 +1,316 @@ +// Copyright 2015 Matthew Holt and The Caddy Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package caddyfile + +import ( + "io" + "log" + "reflect" + "strings" + "testing" +) + +func TestDispenser_Val_Next(t *testing.T) { + input := `host:port + dir1 arg1 + dir2 arg2 arg3 + dir3` + d := newTestDispenser(input) + + if val := d.Val(); val != "" { + t.Fatalf("Val(): Should return empty string when no token loaded; got '%s'", val) + } + + assertNext := func(shouldLoad bool, expectedCursor int, expectedVal string) { + if loaded := d.Next(); loaded != shouldLoad { + t.Errorf("Next(): Expected %v but got %v instead (val '%s')", shouldLoad, loaded, d.Val()) + } + if d.cursor != expectedCursor { + t.Errorf("Expected cursor to be %d, but was %d", expectedCursor, d.cursor) + } + if d.nesting != 0 { + t.Errorf("Nesting should be 0, was %d instead", d.nesting) + } + if val := d.Val(); val != expectedVal { + t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val) + } + } + + assertNext(true, 0, "host:port") + assertNext(true, 1, "dir1") + assertNext(true, 2, "arg1") + assertNext(true, 3, "dir2") + assertNext(true, 4, "arg2") + assertNext(true, 5, "arg3") + assertNext(true, 6, "dir3") + // Note: This next test simply asserts existing behavior. + // If desired, we may wish to empty the token value after + // reading past the EOF. Open an issue if you want this change. + assertNext(false, 6, "dir3") +} + +func TestDispenser_NextArg(t *testing.T) { + input := `dir1 arg1 + dir2 arg2 arg3 + dir3` + d := newTestDispenser(input) + + assertNext := func(shouldLoad bool, expectedVal string, expectedCursor int) { + if d.Next() != shouldLoad { + t.Errorf("Next(): Should load token but got false instead (val: '%s')", d.Val()) + } + if d.cursor != expectedCursor { + t.Errorf("Next(): Expected cursor to be at %d, but it was %d", expectedCursor, d.cursor) + } + if val := d.Val(); val != expectedVal { + t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val) + } + } + + assertNextArg := func(expectedVal string, loadAnother bool, expectedCursor int) { + if !d.NextArg() { + t.Error("NextArg(): Should load next argument but got false instead") + } + if d.cursor != expectedCursor { + t.Errorf("NextArg(): Expected cursor to be at %d, but it was %d", expectedCursor, d.cursor) + } + if val := d.Val(); val != expectedVal { + t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val) + } + if !loadAnother { + if d.NextArg() { + t.Fatalf("NextArg(): Should NOT load another argument, but got true instead (val: '%s')", d.Val()) + } + if d.cursor != expectedCursor { + t.Errorf("NextArg(): Expected cursor to remain at %d, but it was %d", expectedCursor, d.cursor) + } + } + } + + assertNext(true, "dir1", 0) + assertNextArg("arg1", false, 1) + assertNext(true, "dir2", 2) + assertNextArg("arg2", true, 3) + assertNextArg("arg3", false, 4) + assertNext(true, "dir3", 5) + assertNext(false, "dir3", 5) +} + +func TestDispenser_NextLine(t *testing.T) { + input := `host:port + dir1 arg1 + dir2 arg2 arg3` + d := newTestDispenser(input) + + assertNextLine := func(shouldLoad bool, expectedVal string, expectedCursor int) { + if d.NextLine() != shouldLoad { + t.Errorf("NextLine(): Should load token but got false instead (val: '%s')", d.Val()) + } + if d.cursor != expectedCursor { + t.Errorf("NextLine(): Expected cursor to be %d, instead was %d", expectedCursor, d.cursor) + } + if val := d.Val(); val != expectedVal { + t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val) + } + } + + assertNextLine(true, "host:port", 0) + assertNextLine(true, "dir1", 1) + assertNextLine(false, "dir1", 1) + d.Next() // arg1 + assertNextLine(true, "dir2", 3) + assertNextLine(false, "dir2", 3) + d.Next() // arg2 + assertNextLine(false, "arg2", 4) + d.Next() // arg3 + assertNextLine(false, "arg3", 5) +} + +func TestDispenser_NextBlock(t *testing.T) { + input := `foobar1 { + sub1 arg1 + sub2 + } + foobar2 { + }` + d := newTestDispenser(input) + + assertNextBlock := func(shouldLoad bool, expectedCursor, expectedNesting int) { + if loaded := d.NextBlock(); loaded != shouldLoad { + t.Errorf("NextBlock(): Should return %v but got %v", shouldLoad, loaded) + } + if d.cursor != expectedCursor { + t.Errorf("NextBlock(): Expected cursor to be %d, was %d", expectedCursor, d.cursor) + } + if d.nesting != expectedNesting { + t.Errorf("NextBlock(): Nesting should be %d, not %d", expectedNesting, d.nesting) + } + } + + assertNextBlock(false, -1, 0) + d.Next() // foobar1 + assertNextBlock(true, 2, 1) + assertNextBlock(true, 3, 1) + assertNextBlock(true, 4, 1) + assertNextBlock(false, 5, 0) + d.Next() // foobar2 + assertNextBlock(false, 8, 0) // empty block is as if it didn't exist +} + +func TestDispenser_Args(t *testing.T) { + var s1, s2, s3 string + input := `dir1 arg1 arg2 arg3 + dir2 arg4 arg5 + dir3 arg6 arg7 + dir4` + d := newTestDispenser(input) + + d.Next() // dir1 + + // As many strings as arguments + if all := d.Args(&s1, &s2, &s3); !all { + t.Error("Args(): Expected true, got false") + } + if s1 != "arg1" { + t.Errorf("Args(): Expected s1 to be 'arg1', got '%s'", s1) + } + if s2 != "arg2" { + t.Errorf("Args(): Expected s2 to be 'arg2', got '%s'", s2) + } + if s3 != "arg3" { + t.Errorf("Args(): Expected s3 to be 'arg3', got '%s'", s3) + } + + d.Next() // dir2 + + // More strings than arguments + if all := d.Args(&s1, &s2, &s3); all { + t.Error("Args(): Expected false, got true") + } + if s1 != "arg4" { + t.Errorf("Args(): Expected s1 to be 'arg4', got '%s'", s1) + } + if s2 != "arg5" { + t.Errorf("Args(): Expected s2 to be 'arg5', got '%s'", s2) + } + if s3 != "arg3" { + t.Errorf("Args(): Expected s3 to be unchanged ('arg3'), instead got '%s'", s3) + } + + // (quick cursor check just for kicks and giggles) + if d.cursor != 6 { + t.Errorf("Cursor should be 6, but is %d", d.cursor) + } + + d.Next() // dir3 + + // More arguments than strings + if all := d.Args(&s1); !all { + t.Error("Args(): Expected true, got false") + } + if s1 != "arg6" { + t.Errorf("Args(): Expected s1 to be 'arg6', got '%s'", s1) + } + + d.Next() // dir4 + + // No arguments or strings + if all := d.Args(); !all { + t.Error("Args(): Expected true, got false") + } + + // No arguments but at least one string + if all := d.Args(&s1); all { + t.Error("Args(): Expected false, got true") + } +} + +func TestDispenser_RemainingArgs(t *testing.T) { + input := `dir1 arg1 arg2 arg3 + dir2 arg4 arg5 + dir3 arg6 { arg7 + dir4` + d := newTestDispenser(input) + + d.Next() // dir1 + + args := d.RemainingArgs() + if expected := []string{"arg1", "arg2", "arg3"}; !reflect.DeepEqual(args, expected) { + t.Errorf("RemainingArgs(): Expected %v, got %v", expected, args) + } + + d.Next() // dir2 + + args = d.RemainingArgs() + if expected := []string{"arg4", "arg5"}; !reflect.DeepEqual(args, expected) { + t.Errorf("RemainingArgs(): Expected %v, got %v", expected, args) + } + + d.Next() // dir3 + + args = d.RemainingArgs() + if expected := []string{"arg6"}; !reflect.DeepEqual(args, expected) { + t.Errorf("RemainingArgs(): Expected %v, got %v", expected, args) + } + + d.Next() // { + d.Next() // arg7 + d.Next() // dir4 + + args = d.RemainingArgs() + if len(args) != 0 { + t.Errorf("RemainingArgs(): Expected %v, got %v", []string{}, args) + } +} + +func TestDispenser_ArgErr_Err(t *testing.T) { + input := `dir1 { + } + dir2 arg1 arg2` + d := newTestDispenser(input) + + d.cursor = 1 // { + + if err := d.ArgErr(); err == nil || !strings.Contains(err.Error(), "{") { + t.Errorf("ArgErr(): Expected an error message with { in it, but got '%v'", err) + } + + d.cursor = 5 // arg2 + + if err := d.ArgErr(); err == nil || !strings.Contains(err.Error(), "arg2") { + t.Errorf("ArgErr(): Expected an error message with 'arg2' in it; got '%v'", err) + } + + err := d.Err("foobar") + if err == nil { + t.Fatalf("Err(): Expected an error, got nil") + } + + if !strings.Contains(err.Error(), "Testfile:3") { + t.Errorf("Expected error message with filename:line in it; got '%v'", err) + } + + if !strings.Contains(err.Error(), "foobar") { + t.Errorf("Expected error message with custom message in it ('foobar'); got '%v'", err) + } +} + +func newTestDispenser(input string) *Dispenser { + tokens, err := allTokens(strings.NewReader(input)) + if err != nil && err != io.EOF { + log.Fatalf("getting all tokens from input: %v", err) + } + return NewDispenser("Testfile", tokens) +} diff --git a/caddyconfig/caddyfile/lexer.go b/caddyconfig/caddyfile/lexer.go new file mode 100755 index 0000000..efe648d --- /dev/null +++ b/caddyconfig/caddyfile/lexer.go @@ -0,0 +1,150 @@ +// Copyright 2015 Light Code Labs, LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package caddyfile + +import ( + "bufio" + "io" + "unicode" +) + +type ( + // lexer is a utility which can get values, token by + // token, from a Reader. A token is a word, and tokens + // are separated by whitespace. A word can be enclosed + // in quotes if it contains whitespace. + lexer struct { + reader *bufio.Reader + token Token + line int + } + + // Token represents a single parsable unit. + Token struct { + File string + Line int + Text string + } +) + +// load prepares the lexer to scan an input for tokens. +// It discards any leading byte order mark. +func (l *lexer) load(input io.Reader) error { + l.reader = bufio.NewReader(input) + l.line = 1 + + // discard byte order mark, if present + firstCh, _, err := l.reader.ReadRune() + if err != nil { + return err + } + if firstCh != 0xFEFF { + err := l.reader.UnreadRune() + if err != nil { + return err + } + } + + return nil +} + +// next loads the next token into the lexer. +// A token is delimited by whitespace, unless +// the token starts with a quotes character (") +// in which case the token goes until the closing +// quotes (the enclosing quotes are not included). +// Inside quoted strings, quotes may be escaped +// with a preceding \ character. No other chars +// may be escaped. The rest of the line is skipped +// if a "#" character is read in. Returns true if +// a token was loaded; false otherwise. +func (l *lexer) next() bool { + var val []rune + var comment, quoted, escaped bool + + makeToken := func() bool { + l.token.Text = string(val) + return true + } + + for { + ch, _, err := l.reader.ReadRune() + if err != nil { + if len(val) > 0 { + return makeToken() + } + if err == io.EOF { + return false + } + panic(err) + } + + if quoted { + if !escaped { + if ch == '\\' { + escaped = true + continue + } else if ch == '"' { + quoted = false + return makeToken() + } + } + if ch == '\n' { + l.line++ + } + if escaped { + // only escape quotes and newlines + if ch != '"' && ch != '\n' { + val = append(val, '\\') + } + } + val = append(val, ch) + escaped = false + continue + } + + if unicode.IsSpace(ch) { + if ch == '\r' { + continue + } + if ch == '\n' { + l.line++ + comment = false + } + if len(val) > 0 { + return makeToken() + } + continue + } + + if ch == '#' { + comment = true + } + + if comment { + continue + } + + if len(val) == 0 { + l.token = Token{Line: l.line} + if ch == '"' { + quoted = true + continue + } + } + + val = append(val, ch) + } +} diff --git a/caddyconfig/caddyfile/lexer_test.go b/caddyconfig/caddyfile/lexer_test.go new file mode 100755 index 0000000..f9a843c --- /dev/null +++ b/caddyconfig/caddyfile/lexer_test.go @@ -0,0 +1,196 @@ +// Copyright 2015 Light Code Labs, LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package caddyfile + +import ( + "log" + "strings" + "testing" +) + +type lexerTestCase struct { + input string + expected []Token +} + +func TestLexer(t *testing.T) { + testCases := []lexerTestCase{ + { + input: `host:123`, + expected: []Token{ + {Line: 1, Text: "host:123"}, + }, + }, + { + input: `host:123 + + directive`, + expected: []Token{ + {Line: 1, Text: "host:123"}, + {Line: 3, Text: "directive"}, + }, + }, + { + input: `host:123 { + directive + }`, + expected: []Token{ + {Line: 1, Text: "host:123"}, + {Line: 1, Text: "{"}, + {Line: 2, Text: "directive"}, + {Line: 3, Text: "}"}, + }, + }, + { + input: `host:123 { directive }`, + expected: []Token{ + {Line: 1, Text: "host:123"}, + {Line: 1, Text: "{"}, + {Line: 1, Text: "directive"}, + {Line: 1, Text: "}"}, + }, + }, + { + input: `host:123 { + #comment + directive + # comment + foobar # another comment + }`, + expected: []Token{ + {Line: 1, Text: "host:123"}, + {Line: 1, Text: "{"}, + {Line: 3, Text: "directive"}, + {Line: 5, Text: "foobar"}, + {Line: 6, Text: "}"}, + }, + }, + { + input: `a "quoted value" b + foobar`, + expected: []Token{ + {Line: 1, Text: "a"}, + {Line: 1, Text: "quoted value"}, + {Line: 1, Text: "b"}, + {Line: 2, Text: "foobar"}, + }, + }, + { + input: `A "quoted \"value\" inside" B`, + expected: []Token{ + {Line: 1, Text: "A"}, + {Line: 1, Text: `quoted "value" inside`}, + {Line: 1, Text: "B"}, + }, + }, + { + input: "A \"newline \\\ninside\" quotes", + expected: []Token{ + {Line: 1, Text: "A"}, + {Line: 1, Text: "newline \ninside"}, + {Line: 2, Text: "quotes"}, + }, + }, + { + input: `"don't\escape"`, + expected: []Token{ + {Line: 1, Text: `don't\escape`}, + }, + }, + { + input: `"don't\\escape"`, + expected: []Token{ + {Line: 1, Text: `don't\\escape`}, + }, + }, + { + input: `A "quoted value with line + break inside" { + foobar + }`, + expected: []Token{ + {Line: 1, Text: "A"}, + {Line: 1, Text: "quoted value with line\n\t\t\t\t\tbreak inside"}, + {Line: 2, Text: "{"}, + {Line: 3, Text: "foobar"}, + {Line: 4, Text: "}"}, + }, + }, + { + input: `"C:\php\php-cgi.exe"`, + expected: []Token{ + {Line: 1, Text: `C:\php\php-cgi.exe`}, + }, + }, + { + input: `empty "" string`, + expected: []Token{ + {Line: 1, Text: `empty`}, + {Line: 1, Text: ``}, + {Line: 1, Text: `string`}, + }, + }, + { + input: "skip those\r\nCR characters", + expected: []Token{ + {Line: 1, Text: "skip"}, + {Line: 1, Text: "those"}, + {Line: 2, Text: "CR"}, + {Line: 2, Text: "characters"}, + }, + }, + { + input: "\xEF\xBB\xBF:8080", // test with leading byte order mark + expected: []Token{ + {Line: 1, Text: ":8080"}, + }, + }, + } + + for i, testCase := range testCases { + actual := tokenize(testCase.input) + lexerCompare(t, i, testCase.expected, actual) + } +} + +func tokenize(input string) (tokens []Token) { + l := lexer{} + if err := l.load(strings.NewReader(input)); err != nil { + log.Printf("[ERROR] load failed: %v", err) + } + for l.next() { + tokens = append(tokens, l.token) + } + return +} + +func lexerCompare(t *testing.T, n int, expected, actual []Token) { + if len(expected) != len(actual) { + t.Errorf("Test case %d: expected %d token(s) but got %d", n, len(expected), len(actual)) + } + + for i := 0; i < len(actual) && i < len(expected); i++ { + if actual[i].Line != expected[i].Line { + t.Errorf("Test case %d token %d ('%s'): expected line %d but was line %d", + n, i, expected[i].Text, expected[i].Line, actual[i].Line) + break + } + if actual[i].Text != expected[i].Text { + t.Errorf("Test case %d token %d: expected text '%s' but was '%s'", + n, i, expected[i].Text, actual[i].Text) + break + } + } +} diff --git a/caddyconfig/caddyfile/parse.go b/caddyconfig/caddyfile/parse.go new file mode 100755 index 0000000..cc91e3d --- /dev/null +++ b/caddyconfig/caddyfile/parse.go @@ -0,0 +1,492 @@ +// Copyright 2015 Light Code Labs, LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package caddyfile + +import ( + "io" + "log" + "os" + "path/filepath" + "strings" +) + +// Parse parses the input just enough to group tokens, in +// order, by server block. No further parsing is performed. +// Server blocks are returned in the order in which they appear. +// Directives that do not appear in validDirectives will cause +// an error. If you do not want to check for valid directives, +// pass in nil instead. +func Parse(filename string, input io.Reader, validDirectives []string) ([]ServerBlock, error) { + tokens, err := allTokens(input) + if err != nil { + return nil, err + } + p := parser{Dispenser: NewDispenser(filename, tokens), validDirectives: validDirectives} + return p.parseAll() +} + +// allTokens lexes the entire input, but does not parse it. +// It returns all the tokens from the input, unstructured +// and in order. +func allTokens(input io.Reader) ([]Token, error) { + l := new(lexer) + err := l.load(input) + if err != nil { + return nil, err + } + var tokens []Token + for l.next() { + tokens = append(tokens, l.token) + } + return tokens, nil +} + +type parser struct { + *Dispenser + block ServerBlock // current server block being parsed + validDirectives []string // a directive must be valid or it's an error + eof bool // if we encounter a valid EOF in a hard place + definedSnippets map[string][]Token +} + +func (p *parser) parseAll() ([]ServerBlock, error) { + var blocks []ServerBlock + + for p.Next() { + err := p.parseOne() + if err != nil { + return blocks, err + } + if len(p.block.Keys) > 0 { + blocks = append(blocks, p.block) + } + } + + return blocks, nil +} + +func (p *parser) parseOne() error { + p.block = ServerBlock{Tokens: make(map[string][]Token)} + + return p.begin() +} + +func (p *parser) begin() error { + if len(p.tokens) == 0 { + return nil + } + + err := p.addresses() + + if err != nil { + return err + } + + if p.eof { + // this happens if the Caddyfile consists of only + // a line of addresses and nothing else + return nil + } + + if ok, name := p.isSnippet(); ok { + if p.definedSnippets == nil { + p.definedSnippets = map[string][]Token{} + } + if _, found := p.definedSnippets[name]; found { + return p.Errf("redeclaration of previously declared snippet %s", name) + } + // consume all tokens til matched close brace + tokens, err := p.snippetTokens() + if err != nil { + return err + } + p.definedSnippets[name] = tokens + // empty block keys so we don't save this block as a real server. + p.block.Keys = nil + return nil + } + + return p.blockContents() +} + +func (p *parser) addresses() error { + var expectingAnother bool + + for { + tkn := replaceEnvVars(p.Val()) + + // special case: import directive replaces tokens during parse-time + if tkn == "import" && p.isNewLine() { + err := p.doImport() + if err != nil { + return err + } + continue + } + + // Open brace definitely indicates end of addresses + if tkn == "{" { + if expectingAnother { + return p.Errf("Expected another address but had '%s' - check for extra comma", tkn) + } + break + } + + if tkn != "" { // empty token possible if user typed "" + // Trailing comma indicates another address will follow, which + // may possibly be on the next line + if tkn[len(tkn)-1] == ',' { + tkn = tkn[:len(tkn)-1] + expectingAnother = true + } else { + expectingAnother = false // but we may still see another one on this line + } + + p.block.Keys = append(p.block.Keys, tkn) + } + + // Advance token and possibly break out of loop or return error + hasNext := p.Next() + if expectingAnother && !hasNext { + return p.EOFErr() + } + if !hasNext { + p.eof = true + break // EOF + } + if !expectingAnother && p.isNewLine() { + break + } + } + + return nil +} + +func (p *parser) blockContents() error { + errOpenCurlyBrace := p.openCurlyBrace() + if errOpenCurlyBrace != nil { + // single-server configs don't need curly braces + p.cursor-- + } + + err := p.directives() + if err != nil { + return err + } + + // Only look for close curly brace if there was an opening + if errOpenCurlyBrace == nil { + err = p.closeCurlyBrace() + if err != nil { + return err + } + } + + return nil +} + +// directives parses through all the lines for directives +// and it expects the next token to be the first +// directive. It goes until EOF or closing curly brace +// which ends the server block. +func (p *parser) directives() error { + for p.Next() { + // end of server block + if p.Val() == "}" { + break + } + + // special case: import directive replaces tokens during parse-time + if p.Val() == "import" { + err := p.doImport() + if err != nil { + return err + } + p.cursor-- // cursor is advanced when we continue, so roll back one more + continue + } + + // normal case: parse a directive on this line + if err := p.directive(); err != nil { + return err + } + } + return nil +} + +// doImport swaps out the import directive and its argument +// (a total of 2 tokens) with the tokens in the specified file +// or globbing pattern. When the function returns, the cursor +// is on the token before where the import directive was. In +// other words, call Next() to access the first token that was +// imported. +func (p *parser) doImport() error { + // syntax checks + if !p.NextArg() { + return p.ArgErr() + } + importPattern := replaceEnvVars(p.Val()) + if importPattern == "" { + return p.Err("Import requires a non-empty filepath") + } + if p.NextArg() { + return p.Err("Import takes only one argument (glob pattern or file)") + } + // splice out the import directive and its argument (2 tokens total) + tokensBefore := p.tokens[:p.cursor-1] + tokensAfter := p.tokens[p.cursor+1:] + var importedTokens []Token + + // first check snippets. That is a simple, non-recursive replacement + if p.definedSnippets != nil && p.definedSnippets[importPattern] != nil { + importedTokens = p.definedSnippets[importPattern] + } else { + // make path relative to the file of the _token_ being processed rather + // than current working directory (issue #867) and then use glob to get + // list of matching filenames + absFile, err := filepath.Abs(p.Dispenser.File()) + if err != nil { + return p.Errf("Failed to get absolute path of file: %s: %v", p.Dispenser.filename, err) + } + + var matches []string + var globPattern string + if !filepath.IsAbs(importPattern) { + globPattern = filepath.Join(filepath.Dir(absFile), importPattern) + } else { + globPattern = importPattern + } + if strings.Count(globPattern, "*") > 1 || strings.Count(globPattern, "?") > 1 || + (strings.Contains(globPattern, "[") && strings.Contains(globPattern, "]")) { + // See issue #2096 - a pattern with many glob expansions can hang for too long + return p.Errf("Glob pattern may only contain one wildcard (*), but has others: %s", globPattern) + } + matches, err = filepath.Glob(globPattern) + + if err != nil { + return p.Errf("Failed to use import pattern %s: %v", importPattern, err) + } + if len(matches) == 0 { + if strings.ContainsAny(globPattern, "*?[]") { + log.Printf("[WARNING] No files matching import glob pattern: %s", importPattern) + } else { + return p.Errf("File to import not found: %s", importPattern) + } + } + + // collect all the imported tokens + + for _, importFile := range matches { + newTokens, err := p.doSingleImport(importFile) + if err != nil { + return err + } + importedTokens = append(importedTokens, newTokens...) + } + } + + // splice the imported tokens in the place of the import statement + // and rewind cursor so Next() will land on first imported token + p.tokens = append(tokensBefore, append(importedTokens, tokensAfter...)...) + p.cursor-- + + return nil +} + +// doSingleImport lexes the individual file at importFile and returns +// its tokens or an error, if any. +func (p *parser) doSingleImport(importFile string) ([]Token, error) { + file, err := os.Open(importFile) + if err != nil { + return nil, p.Errf("Could not import %s: %v", importFile, err) + } + defer file.Close() + + if info, err := file.Stat(); err != nil { + return nil, p.Errf("Could not import %s: %v", importFile, err) + } else if info.IsDir() { + return nil, p.Errf("Could not import %s: is a directory", importFile) + } + + importedTokens, err := allTokens(file) + if err != nil { + return nil, p.Errf("Could not read tokens while importing %s: %v", importFile, err) + } + + // Tack the file path onto these tokens so errors show the imported file's name + // (we use full, absolute path to avoid bugs: issue #1892) + filename, err := filepath.Abs(importFile) + if err != nil { + return nil, p.Errf("Failed to get absolute path of file: %s: %v", p.Dispenser.filename, err) + } + for i := 0; i < len(importedTokens); i++ { + importedTokens[i].File = filename + } + + return importedTokens, nil +} + +// directive collects tokens until the directive's scope +// closes (either end of line or end of curly brace block). +// It expects the currently-loaded token to be a directive +// (or } that ends a server block). The collected tokens +// are loaded into the current server block for later use +// by directive setup functions. +func (p *parser) directive() error { + dir := replaceEnvVars(p.Val()) + nesting := 0 + + if !p.validDirective(dir) { + return p.Errf("Unknown directive '%s'", dir) + } + + // The directive itself is appended as a relevant token + p.block.Tokens[dir] = append(p.block.Tokens[dir], p.tokens[p.cursor]) + + for p.Next() { + if p.Val() == "{" { + nesting++ + } else if p.isNewLine() && nesting == 0 { + p.cursor-- // read too far + break + } else if p.Val() == "}" && nesting > 0 { + nesting-- + } else if p.Val() == "}" && nesting == 0 { + return p.Err("Unexpected '}' because no matching opening brace") + } else if p.Val() == "import" && p.isNewLine() { + if err := p.doImport(); err != nil { + return err + } + p.cursor-- // cursor is advanced when we continue, so roll back one more + continue + } + p.tokens[p.cursor].Text = replaceEnvVars(p.tokens[p.cursor].Text) + p.block.Tokens[dir] = append(p.block.Tokens[dir], p.tokens[p.cursor]) + } + + if nesting > 0 { + return p.EOFErr() + } + return nil +} + +// openCurlyBrace expects the current token to be an +// opening curly brace. This acts like an assertion +// because it returns an error if the token is not +// a opening curly brace. It does NOT advance the token. +func (p *parser) openCurlyBrace() error { + if p.Val() != "{" { + return p.SyntaxErr("{") + } + return nil +} + +// closeCurlyBrace expects the current token to be +// a closing curly brace. This acts like an assertion +// because it returns an error if the token is not +// a closing curly brace. It does NOT advance the token. +func (p *parser) closeCurlyBrace() error { + if p.Val() != "}" { + return p.SyntaxErr("}") + } + return nil +} + +// validDirective returns true if dir is in p.validDirectives. +func (p *parser) validDirective(dir string) bool { + if p.validDirectives == nil { + return true + } + for _, d := range p.validDirectives { + if d == dir { + return true + } + } + return false +} + +// replaceEnvVars replaces environment variables that appear in the token +// and understands both the $UNIX and %WINDOWS% syntaxes. +func replaceEnvVars(s string) string { + s = replaceEnvReferences(s, "{%", "%}") + s = replaceEnvReferences(s, "{$", "}") + return s +} + +// replaceEnvReferences performs the actual replacement of env variables +// in s, given the placeholder start and placeholder end strings. +func replaceEnvReferences(s, refStart, refEnd string) string { + index := strings.Index(s, refStart) + for index != -1 { + endIndex := strings.Index(s[index:], refEnd) + if endIndex == -1 { + break + } + + endIndex += index + if endIndex > index+len(refStart) { + ref := s[index : endIndex+len(refEnd)] + s = strings.Replace(s, ref, os.Getenv(ref[len(refStart):len(ref)-len(refEnd)]), -1) + } else { + return s + } + index = strings.Index(s, refStart) + } + return s +} + +// ServerBlock associates any number of keys (usually addresses +// of some sort) with tokens (grouped by directive name). +type ServerBlock struct { + Keys []string + Tokens map[string][]Token +} + +func (p *parser) isSnippet() (bool, string) { + keys := p.block.Keys + // A snippet block is a single key with parens. Nothing else qualifies. + if len(keys) == 1 && strings.HasPrefix(keys[0], "(") && strings.HasSuffix(keys[0], ")") { + return true, strings.TrimSuffix(keys[0][1:], ")") + } + return false, "" +} + +// read and store everything in a block for later replay. +func (p *parser) snippetTokens() ([]Token, error) { + // snippet must have curlies. + err := p.openCurlyBrace() + if err != nil { + return nil, err + } + count := 1 + tokens := []Token{} + for p.Next() { + if p.Val() == "}" { + count-- + if count == 0 { + break + } + } + if p.Val() == "{" { + count++ + } + tokens = append(tokens, p.tokens[p.cursor]) + } + // make sure we're matched up + if count != 0 { + return nil, p.SyntaxErr("}") + } + return tokens, nil +} diff --git a/caddyconfig/caddyfile/parse_test.go b/caddyconfig/caddyfile/parse_test.go new file mode 100755 index 0000000..654c68d --- /dev/null +++ b/caddyconfig/caddyfile/parse_test.go @@ -0,0 +1,718 @@ +// Copyright 2015 Light Code Labs, LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package caddyfile + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +func TestAllTokens(t *testing.T) { + input := strings.NewReader("a b c\nd e") + expected := []string{"a", "b", "c", "d", "e"} + tokens, err := allTokens(input) + + if err != nil { + t.Fatalf("Expected no error, got %v", err) + } + if len(tokens) != len(expected) { + t.Fatalf("Expected %d tokens, got %d", len(expected), len(tokens)) + } + + for i, val := range expected { + if tokens[i].Text != val { + t.Errorf("Token %d should be '%s' but was '%s'", i, val, tokens[i].Text) + } + } +} + +func TestParseOneAndImport(t *testing.T) { + testParseOne := func(input string) (ServerBlock, error) { + p := testParser(input) + p.Next() // parseOne doesn't call Next() to start, so we must + err := p.parseOne() + return p.block, err + } + + for i, test := range []struct { + input string + shouldErr bool + keys []string + tokens map[string]int // map of directive name to number of tokens expected + }{ + {`localhost`, false, []string{ + "localhost", + }, map[string]int{}}, + + {`localhost + dir1`, false, []string{ + "localhost", + }, map[string]int{ + "dir1": 1, + }}, + + {`localhost:1234 + dir1 foo bar`, false, []string{ + "localhost:1234", + }, map[string]int{ + "dir1": 3, + }}, + + {`localhost { + dir1 + }`, false, []string{ + "localhost", + }, map[string]int{ + "dir1": 1, + }}, + + {`localhost:1234 { + dir1 foo bar + dir2 + }`, false, []string{ + "localhost:1234", + }, map[string]int{ + "dir1": 3, + "dir2": 1, + }}, + + {`http://localhost https://localhost + dir1 foo bar`, false, []string{ + "http://localhost", + "https://localhost", + }, map[string]int{ + "dir1": 3, + }}, + + {`http://localhost https://localhost { + dir1 foo bar + }`, false, []string{ + "http://localhost", + "https://localhost", + }, map[string]int{ + "dir1": 3, + }}, + + {`http://localhost, https://localhost { + dir1 foo bar + }`, false, []string{ + "http://localhost", + "https://localhost", + }, map[string]int{ + "dir1": 3, + }}, + + {`http://localhost, { + }`, true, []string{ + "http://localhost", + }, map[string]int{}}, + + {`host1:80, http://host2.com + dir1 foo bar + dir2 baz`, false, []string{ + "host1:80", + "http://host2.com", + }, map[string]int{ + "dir1": 3, + "dir2": 2, + }}, + + {`http://host1.com, + http://host2.com, + https://host3.com`, false, []string{ + "http://host1.com", + "http://host2.com", + "https://host3.com", + }, map[string]int{}}, + + {`http://host1.com:1234, https://host2.com + dir1 foo { + bar baz + } + dir2`, false, []string{ + "http://host1.com:1234", + "https://host2.com", + }, map[string]int{ + "dir1": 6, + "dir2": 1, + }}, + + {`127.0.0.1 + dir1 { + bar baz + } + dir2 { + foo bar + }`, false, []string{ + "127.0.0.1", + }, map[string]int{ + "dir1": 5, + "dir2": 5, + }}, + + {`localhost + dir1 { + foo`, true, []string{ + "localhost", + }, map[string]int{ + "dir1": 3, + }}, + + {`localhost + dir1 { + }`, false, []string{ + "localhost", + }, map[string]int{ + "dir1": 3, + }}, + + {`localhost + dir1 { + } }`, true, []string{ + "localhost", + }, map[string]int{ + "dir1": 3, + }}, + + {`localhost + dir1 { + nested { + foo + } + } + dir2 foo bar`, false, []string{ + "localhost", + }, map[string]int{ + "dir1": 7, + "dir2": 3, + }}, + + {``, false, []string{}, map[string]int{}}, + + {`localhost + dir1 arg1 + import testdata/import_test1.txt`, false, []string{ + "localhost", + }, map[string]int{ + "dir1": 2, + "dir2": 3, + "dir3": 1, + }}, + + {`import testdata/import_test2.txt`, false, []string{ + "host1", + }, map[string]int{ + "dir1": 1, + "dir2": 2, + }}, + + {`import testdata/import_test1.txt testdata/import_test2.txt`, true, []string{}, map[string]int{}}, + + {`import testdata/not_found.txt`, true, []string{}, map[string]int{}}, + + {`""`, false, []string{}, map[string]int{}}, + + {``, false, []string{}, map[string]int{}}, + + // test cases found by fuzzing! + {`import }{$"`, true, []string{}, map[string]int{}}, + {`import /*/*.txt`, true, []string{}, map[string]int{}}, + {`import /???/?*?o`, true, []string{}, map[string]int{}}, + {`import /??`, true, []string{}, map[string]int{}}, + {`import /[a-z]`, true, []string{}, map[string]int{}}, + {`import {$}`, true, []string{}, map[string]int{}}, + {`import {%}`, true, []string{}, map[string]int{}}, + {`import {$$}`, true, []string{}, map[string]int{}}, + {`import {%%}`, true, []string{}, map[string]int{}}, + } { + result, err := testParseOne(test.input) + + if test.shouldErr && err == nil { + t.Errorf("Test %d: Expected an error, but didn't get one", i) + } + if !test.shouldErr && err != nil { + t.Errorf("Test %d: Expected no error, but got: %v", i, err) + } + + if len(result.Keys) != len(test.keys) { + t.Errorf("Test %d: Expected %d keys, got %d", + i, len(test.keys), len(result.Keys)) + continue + } + for j, addr := range result.Keys { + if addr != test.keys[j] { + t.Errorf("Test %d, key %d: Expected '%s', but was '%s'", + i, j, test.keys[j], addr) + } + } + + if len(result.Tokens) != len(test.tokens) { + t.Errorf("Test %d: Expected %d directives, had %d", + i, len(test.tokens), len(result.Tokens)) + continue + } + for directive, tokens := range result.Tokens { + if len(tokens) != test.tokens[directive] { + t.Errorf("Test %d, directive '%s': Expected %d tokens, counted %d", + i, directive, test.tokens[directive], len(tokens)) + continue + } + } + } +} + +func TestRecursiveImport(t *testing.T) { + testParseOne := func(input string) (ServerBlock, error) { + p := testParser(input) + p.Next() // parseOne doesn't call Next() to start, so we must + err := p.parseOne() + return p.block, err + } + + isExpected := func(got ServerBlock) bool { + if len(got.Keys) != 1 || got.Keys[0] != "localhost" { + t.Errorf("got keys unexpected: expect localhost, got %v", got.Keys) + return false + } + if len(got.Tokens) != 2 { + t.Errorf("got wrong number of tokens: expect 2, got %d", len(got.Tokens)) + return false + } + if len(got.Tokens["dir1"]) != 1 || len(got.Tokens["dir2"]) != 2 { + t.Errorf("got unexpect tokens: %v", got.Tokens) + return false + } + return true + } + + recursiveFile1, err := filepath.Abs("testdata/recursive_import_test1") + if err != nil { + t.Fatal(err) + } + recursiveFile2, err := filepath.Abs("testdata/recursive_import_test2") + if err != nil { + t.Fatal(err) + } + + // test relative recursive import + err = ioutil.WriteFile(recursiveFile1, []byte( + `localhost + dir1 + import recursive_import_test2`), 0644) + if err != nil { + t.Fatal(err) + } + defer os.Remove(recursiveFile1) + + err = ioutil.WriteFile(recursiveFile2, []byte("dir2 1"), 0644) + if err != nil { + t.Fatal(err) + } + defer os.Remove(recursiveFile2) + + // import absolute path + result, err := testParseOne("import " + recursiveFile1) + if err != nil { + t.Fatal(err) + } + if !isExpected(result) { + t.Error("absolute+relative import failed") + } + + // import relative path + result, err = testParseOne("import testdata/recursive_import_test1") + if err != nil { + t.Fatal(err) + } + if !isExpected(result) { + t.Error("relative+relative import failed") + } + + // test absolute recursive import + err = ioutil.WriteFile(recursiveFile1, []byte( + `localhost + dir1 + import `+recursiveFile2), 0644) + if err != nil { + t.Fatal(err) + } + + // import absolute path + result, err = testParseOne("import " + recursiveFile1) + if err != nil { + t.Fatal(err) + } + if !isExpected(result) { + t.Error("absolute+absolute import failed") + } + + // import relative path + result, err = testParseOne("import testdata/recursive_import_test1") + if err != nil { + t.Fatal(err) + } + if !isExpected(result) { + t.Error("relative+absolute import failed") + } +} + +func TestDirectiveImport(t *testing.T) { + testParseOne := func(input string) (ServerBlock, error) { + p := testParser(input) + p.Next() // parseOne doesn't call Next() to start, so we must + err := p.parseOne() + return p.block, err + } + + isExpected := func(got ServerBlock) bool { + if len(got.Keys) != 1 || got.Keys[0] != "localhost" { + t.Errorf("got keys unexpected: expect localhost, got %v", got.Keys) + return false + } + if len(got.Tokens) != 2 { + t.Errorf("got wrong number of tokens: expect 2, got %d", len(got.Tokens)) + return false + } + if len(got.Tokens["dir1"]) != 1 || len(got.Tokens["proxy"]) != 8 { + t.Errorf("got unexpect tokens: %v", got.Tokens) + return false + } + return true + } + + directiveFile, err := filepath.Abs("testdata/directive_import_test") + if err != nil { + t.Fatal(err) + } + + err = ioutil.WriteFile(directiveFile, []byte(`prop1 1 + prop2 2`), 0644) + if err != nil { + t.Fatal(err) + } + defer os.Remove(directiveFile) + + // import from existing file + result, err := testParseOne(`localhost + dir1 + proxy { + import testdata/directive_import_test + transparent + }`) + if err != nil { + t.Fatal(err) + } + if !isExpected(result) { + t.Error("directive import failed") + } + + // import from nonexistent file + _, err = testParseOne(`localhost + dir1 + proxy { + import testdata/nonexistent_file + transparent + }`) + if err == nil { + t.Fatal("expected error when importing a nonexistent file") + } +} + +func TestParseAll(t *testing.T) { + for i, test := range []struct { + input string + shouldErr bool + keys [][]string // keys per server block, in order + }{ + {`localhost`, false, [][]string{ + {"localhost"}, + }}, + + {`localhost:1234`, false, [][]string{ + {"localhost:1234"}, + }}, + + {`localhost:1234 { + } + localhost:2015 { + }`, false, [][]string{ + {"localhost:1234"}, + {"localhost:2015"}, + }}, + + {`localhost:1234, http://host2`, false, [][]string{ + {"localhost:1234", "http://host2"}, + }}, + + {`localhost:1234, http://host2,`, true, [][]string{}}, + + {`http://host1.com, http://host2.com { + } + https://host3.com, https://host4.com { + }`, false, [][]string{ + {"http://host1.com", "http://host2.com"}, + {"https://host3.com", "https://host4.com"}, + }}, + + {`import testdata/import_glob*.txt`, false, [][]string{ + {"glob0.host0"}, + {"glob0.host1"}, + {"glob1.host0"}, + {"glob2.host0"}, + }}, + + {`import notfound/*`, false, [][]string{}}, // glob needn't error with no matches + {`import notfound/file.conf`, true, [][]string{}}, // but a specific file should + } { + p := testParser(test.input) + blocks, err := p.parseAll() + + if test.shouldErr && err == nil { + t.Errorf("Test %d: Expected an error, but didn't get one", i) + } + if !test.shouldErr && err != nil { + t.Errorf("Test %d: Expected no error, but got: %v", i, err) + } + + if len(blocks) != len(test.keys) { + t.Errorf("Test %d: Expected %d server blocks, got %d", + i, len(test.keys), len(blocks)) + continue + } + for j, block := range blocks { + if len(block.Keys) != len(test.keys[j]) { + t.Errorf("Test %d: Expected %d keys in block %d, got %d", + i, len(test.keys[j]), j, len(block.Keys)) + continue + } + for k, addr := range block.Keys { + if addr != test.keys[j][k] { + t.Errorf("Test %d, block %d, key %d: Expected '%s', but got '%s'", + i, j, k, test.keys[j][k], addr) + } + } + } + } +} + +func TestEnvironmentReplacement(t *testing.T) { + os.Setenv("PORT", "8080") + os.Setenv("ADDRESS", "servername.com") + os.Setenv("FOOBAR", "foobar") + os.Setenv("PARTIAL_DIR", "r1") + + // basic test; unix-style env vars + p := testParser(`{$ADDRESS}`) + blocks, _ := p.parseAll() + if actual, expected := blocks[0].Keys[0], "servername.com"; expected != actual { + t.Errorf("Expected key to be '%s' but was '%s'", expected, actual) + } + + // basic test; unix-style env vars + p = testParser(`di{$PARTIAL_DIR}`) + blocks, _ = p.parseAll() + if actual, expected := blocks[0].Keys[0], "dir1"; expected != actual { + t.Errorf("Expected key to be '%s' but was '%s'", expected, actual) + } + + // multiple vars per token + p = testParser(`{$ADDRESS}:{$PORT}`) + blocks, _ = p.parseAll() + if actual, expected := blocks[0].Keys[0], "servername.com:8080"; expected != actual { + t.Errorf("Expected key to be '%s' but was '%s'", expected, actual) + } + + // windows-style var and unix style in same token + p = testParser(`{%ADDRESS%}:{$PORT}`) + blocks, _ = p.parseAll() + if actual, expected := blocks[0].Keys[0], "servername.com:8080"; expected != actual { + t.Errorf("Expected key to be '%s' but was '%s'", expected, actual) + } + + // reverse order + p = testParser(`{$ADDRESS}:{%PORT%}`) + blocks, _ = p.parseAll() + if actual, expected := blocks[0].Keys[0], "servername.com:8080"; expected != actual { + t.Errorf("Expected key to be '%s' but was '%s'", expected, actual) + } + + // env var in server block body as argument + p = testParser(":{%PORT%}\ndir1 {$FOOBAR}") + blocks, _ = p.parseAll() + if actual, expected := blocks[0].Keys[0], ":8080"; expected != actual { + t.Errorf("Expected key to be '%s' but was '%s'", expected, actual) + } + if actual, expected := blocks[0].Tokens["dir1"][1].Text, "foobar"; expected != actual { + t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual) + } + + // combined windows env vars in argument + p = testParser(":{%PORT%}\ndir1 {%ADDRESS%}/{%FOOBAR%}") + blocks, _ = p.parseAll() + if actual, expected := blocks[0].Tokens["dir1"][1].Text, "servername.com/foobar"; expected != actual { + t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual) + } + + // malformed env var (windows) + p = testParser(":1234\ndir1 {%ADDRESS}") + blocks, _ = p.parseAll() + if actual, expected := blocks[0].Tokens["dir1"][1].Text, "{%ADDRESS}"; expected != actual { + t.Errorf("Expected host to be '%s' but was '%s'", expected, actual) + } + + // malformed (non-existent) env var (unix) + p = testParser(`:{$PORT$}`) + blocks, _ = p.parseAll() + if actual, expected := blocks[0].Keys[0], ":"; expected != actual { + t.Errorf("Expected key to be '%s' but was '%s'", expected, actual) + } + + // in quoted field + p = testParser(":1234\ndir1 \"Test {$FOOBAR} test\"") + blocks, _ = p.parseAll() + if actual, expected := blocks[0].Tokens["dir1"][1].Text, "Test foobar test"; expected != actual { + t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual) + } + + // after end token + p = testParser(":1234\nanswer \"{{ .Name }} {$FOOBAR}\"") + blocks, _ = p.parseAll() + if actual, expected := blocks[0].Tokens["answer"][1].Text, "{{ .Name }} foobar"; expected != actual { + t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual) + } +} + +func testParser(input string) parser { + return parser{Dispenser: newTestDispenser(input)} +} + +func TestSnippets(t *testing.T) { + p := testParser(` + (common) { + gzip foo + errors stderr + } + http://example.com { + import common + } + `) + blocks, err := p.parseAll() + if err != nil { + t.Fatal(err) + } + for _, b := range blocks { + t.Log(b.Keys) + t.Log(b.Tokens) + } + if len(blocks) != 1 { + t.Fatalf("Expect exactly one server block. Got %d.", len(blocks)) + } + if actual, expected := blocks[0].Keys[0], "http://example.com"; expected != actual { + t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual) + } + if len(blocks[0].Tokens) != 2 { + t.Fatalf("Server block should have tokens from import") + } + if actual, expected := blocks[0].Tokens["gzip"][0].Text, "gzip"; expected != actual { + t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual) + } + if actual, expected := blocks[0].Tokens["errors"][1].Text, "stderr"; expected != actual { + t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual) + } + +} + +func writeStringToTempFileOrDie(t *testing.T, str string) (pathToFile string) { + file, err := ioutil.TempFile("", t.Name()) + if err != nil { + panic(err) // get a stack trace so we know where this was called from. + } + if _, err := file.WriteString(str); err != nil { + panic(err) + } + if err := file.Close(); err != nil { + panic(err) + } + return file.Name() +} + +func TestImportedFilesIgnoreNonDirectiveImportTokens(t *testing.T) { + fileName := writeStringToTempFileOrDie(t, ` + http://example.com { + # This isn't an import directive, it's just an arg with value 'import' + basicauth / import password + } + `) + // Parse the root file that imports the other one. + p := testParser(`import ` + fileName) + blocks, err := p.parseAll() + if err != nil { + t.Fatal(err) + } + for _, b := range blocks { + t.Log(b.Keys) + t.Log(b.Tokens) + } + auth := blocks[0].Tokens["basicauth"] + line := auth[0].Text + " " + auth[1].Text + " " + auth[2].Text + " " + auth[3].Text + if line != "basicauth / import password" { + // Previously, it would be changed to: + // basicauth / import /path/to/test/dir/password + // referencing a file that (probably) doesn't exist and changing the + // password! + t.Errorf("Expected basicauth tokens to be 'basicauth / import password' but got %#q", line) + } +} + +func TestSnippetAcrossMultipleFiles(t *testing.T) { + // Make the derived Caddyfile that expects (common) to be defined. + fileName := writeStringToTempFileOrDie(t, ` + http://example.com { + import common + } + `) + + // Parse the root file that defines (common) and then imports the other one. + p := testParser(` + (common) { + gzip foo + } + import ` + fileName + ` + `) + + blocks, err := p.parseAll() + if err != nil { + t.Fatal(err) + } + for _, b := range blocks { + t.Log(b.Keys) + t.Log(b.Tokens) + } + if len(blocks) != 1 { + t.Fatalf("Expect exactly one server block. Got %d.", len(blocks)) + } + if actual, expected := blocks[0].Keys[0], "http://example.com"; expected != actual { + t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual) + } + if len(blocks[0].Tokens) != 1 { + t.Fatalf("Server block should have tokens from import") + } + if actual, expected := blocks[0].Tokens["gzip"][0].Text, "gzip"; expected != actual { + t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual) + } +} diff --git a/caddyconfig/caddyfile/testdata/import_glob0.txt b/caddyconfig/caddyfile/testdata/import_glob0.txt new file mode 100755 index 0000000..e610b5e --- /dev/null +++ b/caddyconfig/caddyfile/testdata/import_glob0.txt @@ -0,0 +1,6 @@ +glob0.host0 { + dir2 arg1 +} + +glob0.host1 { +} diff --git a/caddyconfig/caddyfile/testdata/import_glob1.txt b/caddyconfig/caddyfile/testdata/import_glob1.txt new file mode 100755 index 0000000..111eb04 --- /dev/null +++ b/caddyconfig/caddyfile/testdata/import_glob1.txt @@ -0,0 +1,4 @@ +glob1.host0 { + dir1 + dir2 arg1 +} diff --git a/caddyconfig/caddyfile/testdata/import_glob2.txt b/caddyconfig/caddyfile/testdata/import_glob2.txt new file mode 100755 index 0000000..c09f784 --- /dev/null +++ b/caddyconfig/caddyfile/testdata/import_glob2.txt @@ -0,0 +1,3 @@ +glob2.host0 { + dir2 arg1 +} diff --git a/caddyconfig/caddyfile/testdata/import_test1.txt b/caddyconfig/caddyfile/testdata/import_test1.txt new file mode 100755 index 0000000..dac7b29 --- /dev/null +++ b/caddyconfig/caddyfile/testdata/import_test1.txt @@ -0,0 +1,2 @@ +dir2 arg1 arg2 +dir3
\ No newline at end of file diff --git a/caddyconfig/caddyfile/testdata/import_test2.txt b/caddyconfig/caddyfile/testdata/import_test2.txt new file mode 100755 index 0000000..140c879 --- /dev/null +++ b/caddyconfig/caddyfile/testdata/import_test2.txt @@ -0,0 +1,4 @@ +host1 { + dir1 + dir2 arg1 +}
\ No newline at end of file diff --git a/caddyconfig/configadapters.go b/caddyconfig/configadapters.go new file mode 100644 index 0000000..6e5d530 --- /dev/null +++ b/caddyconfig/configadapters.go @@ -0,0 +1,113 @@ +// Copyright 2015 Matthew Holt and The Caddy Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package caddyconfig + +import ( + "encoding/json" + "fmt" +) + +// Adapter is a type which can adapt a configuration to Caddy JSON. +// It returns the results and any warnings, or an error. +type Adapter interface { + Adapt(body []byte, options map[string]string) ([]byte, []Warning, error) +} + +// Warning represents a warning or notice related to conversion. +type Warning struct { + File string + Line int + Directive string + Message string +} + +// JSON encodes val as JSON, returning it as a json.RawMessage. Any +// marshaling errors (which are highly unlikely with correct code) +// are converted to warnings. This is convenient when filling config +// structs that require a json.RawMessage, without having to worry +// about errors. +func JSON(val interface{}, warnings *[]Warning) json.RawMessage { + b, err := json.Marshal(val) + if err != nil { + if warnings != nil { + *warnings = append(*warnings, Warning{Message: err.Error()}) + } + return nil + } + return b +} + +// JSONModuleObject is like JSON, except it marshals val into a JSON object +// and then adds a key to that object named fieldName with the value fieldVal. +// This is useful for JSON-encoding module values where the module name has to +// be described within the object by a certain key; for example, +// "responder": "file_server" for a file server HTTP responder. The val must +// encode into a map[string]interface{} (i.e. it must be a struct or map), +// and any errors are converted into warnings, so this can be conveniently +// used when filling a struct. For correct code, there should be no errors. +func JSONModuleObject(val interface{}, fieldName, fieldVal string, warnings *[]Warning) json.RawMessage { + // encode to a JSON object first + enc, err := json.Marshal(val) + if err != nil { + if warnings != nil { + *warnings = append(*warnings, Warning{Message: err.Error()}) + } + return nil + } + + // then decode the object + var tmp map[string]interface{} + err = json.Unmarshal(enc, &tmp) + if err != nil { + if warnings != nil { + *warnings = append(*warnings, Warning{Message: err.Error()}) + } + return nil + } + + // so we can easily add the module's field with its appointed value + tmp[fieldName] = fieldVal + + // then re-marshal as JSON + result, err := json.Marshal(tmp) + if err != nil { + if warnings != nil { + *warnings = append(*warnings, Warning{Message: err.Error()}) + } + return nil + } + + return result +} + +// JSONIndent is used to JSON-marshal the final resulting Caddy +// configuration in a consistent, human-readable way. +func JSONIndent(val interface{}) ([]byte, error) { + return json.MarshalIndent(val, "", "\t") +} + +func RegisterAdapter(name string, adapter Adapter) error { + if _, ok := configAdapters[name]; ok { + return fmt.Errorf("%s: already registered", name) + } + configAdapters[name] = adapter + return nil +} + +func GetAdapter(name string) Adapter { + return configAdapters[name] +} + +var configAdapters = make(map[string]Adapter) diff --git a/caddyconfig/httpcaddyfile/addresses.go b/caddyconfig/httpcaddyfile/addresses.go new file mode 100644 index 0000000..6ecee26 --- /dev/null +++ b/caddyconfig/httpcaddyfile/addresses.go @@ -0,0 +1,350 @@ +// Copyright 2015 Matthew Holt and The Caddy Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httpcaddyfile + +import ( + "fmt" + "net" + "net/url" + "reflect" + "strconv" + "strings" + + "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" + "github.com/caddyserver/caddy/v2/modules/caddyhttp" + "github.com/mholt/certmagic" +) + +// mapAddressToServerBlocks returns a map of listener address to list of server +// blocks that will be served on that address. To do this, each server block is +// expanded so that each one is considered individually, although keys of a +// server block that share the same address stay grouped together so the config +// isn't repeated unnecessarily. For example, this Caddyfile: +// +// example.com { +// bind 127.0.0.1 +// } +// www.example.com, example.net/path, localhost:9999 { +// bind 127.0.0.1 1.2.3.4 +// } +// +// has two server blocks to start with. But expressed in this Caddyfile are +// actually 4 listener addresses: 127.0.0.1:443, 1.2.3.4:443, 127.0.0.1:9999, +// and 127.0.0.1:9999. This is because the bind directive is applied to each +// key of its server block (specifying the host part), and each key may have +// a different port. And we definitely need to be sure that a site which is +// bound to be served on a specific interface is not served on others just +// beceause that is more convenient: it would be a potential security risk +// if the difference between interfaces means private vs. public. +// +// So what this function does for the example above is iterate each server +// block, and for each server block, iterate its keys. For the first, it +// finds one key (example.com) and determines its listener address +// (127.0.0.1:443 - because of 'bind' and automatic HTTPS). It then adds +// the listener address to the map value returned by this function, with +// the first server block as one of its associations. +// +// It then iterates each key on the second server block and associates them +// with one or more listener addresses. Indeed, each key in this block has +// two listener addresses because of the 'bind' directive. Once we know +// which addresses serve which keys, we can create a new server block for +// each address containing the contents of the server block and only those +// specific keys of the server block which use that address. +// +// It is possible and even likely that some keys in the returned map have +// the exact same list of server blocks (i.e. they are identical). This +// happens when multiple hosts are declared with a 'bind' directive and +// the resulting listener addresses are not shared by any other server +// block (or the other server blocks are exactly identical in their token +// contents). This happens with our example above because 1.2.3.4:443 +// and 1.2.3.4:9999 are used exclusively with the second server block. This +// repetition may be undesirable, so call consolidateAddrMappings() to map +// multiple addresses to the same lists of server blocks (a many:many mapping). +// (Doing this is essentially a map-reduce technique.) +func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []caddyfile.ServerBlock) (map[string][]caddyfile.ServerBlock, error) { + sbmap := make(map[string][]caddyfile.ServerBlock) + + for i, sblock := range originalServerBlocks { + // within a server block, we need to map all the listener addresses + // implied by the server block to the keys of the server block which + // will be served by them; this has the effect of treating each + // key of a server block as its own, but without having to repeat its + // contents in cases where multiple keys really can be served together + addrToKeys := make(map[string][]string) + for j, key := range sblock.Keys { + // a key can have multiple listener addresses if there are multiple + // arguments to the 'bind' directive (although they will all have + // the same port, since the port is defined by the key or is implicit + // through automatic HTTPS) + addrs, err := st.listenerAddrsForServerBlockKey(sblock, key) + if err != nil { + return nil, fmt.Errorf("server block %d, key %d (%s): determining listener address: %v", i, j, key, err) + } + + // associate this key with each listener address it is served on + for _, addr := range addrs { + addrToKeys[addr] = append(addrToKeys[addr], key) + } + } + + // now that we know which addresses serve which keys of this + // server block, we iterate that mapping and create a list of + // new server blocks for each address where the keys of the + // server block are only the ones which use the address; but + // the contents (tokens) are of course the same + for addr, keys := range addrToKeys { + sbmap[addr] = append(sbmap[addr], caddyfile.ServerBlock{ + Keys: keys, + Tokens: sblock.Tokens, + }) + } + } + + return sbmap, nil +} + +// consolidateAddrMappings eliminates repetition of identical server blocks in a mapping of +// single listener addresses to lists of server blocks. Since multiple addresses may serve +// identical sites (server block contents), this function turns a 1:many mapping into a +// many:many mapping. Server block contents (tokens) must be exactly identical so that +// reflect.DeepEqual returns true in order for the addresses to be combined. Identical +// entries are deleted from the addrToServerBlocks map. Essentially, each pairing (each +// association from multiple addresses to multiple server blocks; i.e. each element of +// the returned slice) becomes a server definition in the output JSON. +func (st *ServerType) consolidateAddrMappings(addrToServerBlocks map[string][]caddyfile.ServerBlock) []sbAddrAssociation { + var sbaddrs []sbAddrAssociation + for addr, sblocks := range addrToServerBlocks { + // we start with knowing that at least this address + // maps to these server blocks + a := sbAddrAssociation{ + addresses: []string{addr}, + serverBlocks: sblocks, + } + + // now find other addresses that map to identical + // server blocks and add them to our list of + // addresses, while removing them from the map + for otherAddr, otherSblocks := range addrToServerBlocks { + if addr == otherAddr { + continue + } + if reflect.DeepEqual(sblocks, otherSblocks) { + a.addresses = append(a.addresses, otherAddr) + delete(addrToServerBlocks, otherAddr) + } + } + + sbaddrs = append(sbaddrs, a) + } + return sbaddrs +} + +func (st *ServerType) listenerAddrsForServerBlockKey(sblock caddyfile.ServerBlock, key string) ([]string, error) { + addr, err := standardizeAddress(key) + if err != nil { + return nil, fmt.Errorf("parsing key: %v", err) + } + + lnPort := defaultPort + if addr.Port != "" { + // port explicitly defined + lnPort = addr.Port + } else if certmagic.HostQualifies(addr.Host) { + // automatic HTTPS + lnPort = strconv.Itoa(certmagic.HTTPSPort) + } + + // the bind directive specifies hosts, but is optional + var lnHosts []string + for i, token := range sblock.Tokens["bind"] { + if i == 0 { + continue + } + lnHosts = append(lnHosts, token.Text) + } + if len(lnHosts) == 0 { + lnHosts = []string{""} + } + + // use a map to prevent duplication + listeners := make(map[string]struct{}) + for _, host := range lnHosts { + listeners[net.JoinHostPort(host, lnPort)] = struct{}{} + } + + // now turn map into list + var listenersList []string + for lnStr := range listeners { + listenersList = append(listenersList, lnStr) + } + // sort.Strings(listenersList) // TODO: is sorting necessary? + + return listenersList, nil +} + +// Address represents a site address. It contains +// the original input value, and the component +// parts of an address. The component parts may be +// updated to the correct values as setup proceeds, +// but the original value should never be changed. +// +// The Host field must be in a normalized form. +type Address struct { + Original, Scheme, Host, Port, Path string +} + +// String returns a human-friendly print of the address. +func (a Address) String() string { + if a.Host == "" && a.Port == "" { + return "" + } + scheme := a.Scheme + if scheme == "" { + if a.Port == strconv.Itoa(certmagic.HTTPSPort) { + scheme = "https" + } else { + scheme = "http" + } + } + s := scheme + if s != "" { + s += "://" + } + if a.Port != "" && + ((scheme == "https" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPSPort)) || + (scheme == "http" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPPort))) { + s += net.JoinHostPort(a.Host, a.Port) + } else { + s += a.Host + } + if a.Path != "" { + s += a.Path + } + return s +} + +// VHost returns a sensible concatenation of Host:Port/Path from a. +// It's basically the a.Original but without the scheme. +func (a Address) VHost() string { + if idx := strings.Index(a.Original, "://"); idx > -1 { + return a.Original[idx+3:] + } + return a.Original +} + +// Normalize normalizes URL: turn scheme and host names into lower case +func (a Address) Normalize() Address { + path := a.Path + if !caseSensitivePath { + path = strings.ToLower(path) + } + + // ensure host is normalized if it's an IP address + host := a.Host + if ip := net.ParseIP(host); ip != nil { + host = ip.String() + } + + return Address{ + Original: a.Original, + Scheme: strings.ToLower(a.Scheme), + Host: strings.ToLower(host), + Port: a.Port, + Path: path, + } +} + +// Key is similar to String, just replaces scheme and host values with modified values. +// Unlike String it doesn't add anything default (scheme, port, etc) +func (a Address) Key() string { + res := "" + if a.Scheme != "" { + res += a.Scheme + "://" + } + if a.Host != "" { + res += a.Host + } + if a.Port != "" { + if strings.HasPrefix(a.Original[len(res):], ":"+a.Port) { + // insert port only if the original has its own explicit port + res += ":" + a.Port + } + } + if a.Path != "" { + res += a.Path + } + return res +} + +// standardizeAddress parses an address string into a structured format with separate +// scheme, host, port, and path portions, as well as the original input string. +func standardizeAddress(str string) (Address, error) { + httpPort, httpsPort := strconv.Itoa(certmagic.HTTPPort), strconv.Itoa(certmagic.HTTPSPort) + + input := str + + // Split input into components (prepend with // to assert host by default) + if !strings.Contains(str, "//") && !strings.HasPrefix(str, "/") { + str = "//" + str + } + u, err := url.Parse(str) + if err != nil { + return Address{}, err + } + + // separate host and port + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + host, port, err = net.SplitHostPort(u.Host + ":") + if err != nil { + host = u.Host + } + } + + // see if we can set port based off scheme + if port == "" { + if u.Scheme == "http" { + port = httpPort + } else if u.Scheme == "https" { + port = httpsPort + } + } + + // repeated or conflicting scheme is confusing, so error + if u.Scheme != "" && (port == "http" || port == "https") { + return Address{}, fmt.Errorf("[%s] scheme specified twice in address", input) + } + + // error if scheme and port combination violate convention + if (u.Scheme == "http" && port == httpsPort) || (u.Scheme == "https" && port == httpPort) { + return Address{}, fmt.Errorf("[%s] scheme and port violate convention", input) + } + + // standardize http and https ports to their respective port numbers + if port == "http" { + u.Scheme = "http" + port = httpPort + } else if port == "https" { + u.Scheme = "https" + port = httpsPort + } + + return Address{Original: input, Scheme: u.Scheme, Host: host, Port: port, Path: u.Path}, err +} + +const ( + defaultPort = "2015" + caseSensitivePath = false +) diff --git a/caddyconfig/httpcaddyfile/addresses_test.go b/caddyconfig/httpcaddyfile/addresses_test.go new file mode 100644 index 0000000..7e03d29 --- /dev/null +++ b/caddyconfig/httpcaddyfile/addresses_test.go @@ -0,0 +1,129 @@ +// Copyright 2015 Matthew Holt and The Caddy Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httpcaddyfile + +import "testing" + +func TestStandardizeAddress(t *testing.T) { + for i, test := range []struct { + input string + scheme, host, port, path string + shouldErr bool + }{ + {`localhost`, "", "localhost", "", "", false}, + {`localhost:1234`, "", "localhost", "1234", "", false}, + {`localhost:`, "", "localhost", "", "", false}, + {`0.0.0.0`, "", "0.0.0.0", "", "", false}, + {`127.0.0.1:1234`, "", "127.0.0.1", "1234", "", false}, + {`:1234`, "", "", "1234", "", false}, + {`[::1]`, "", "::1", "", "", false}, + {`[::1]:1234`, "", "::1", "1234", "", false}, + {`:`, "", "", "", "", false}, + {`localhost:http`, "http", "localhost", "80", "", false}, + {`localhost:https`, "https", "localhost", "443", "", false}, + {`:http`, "http", "", "80", "", false}, + {`:https`, "https", "", "443", "", false}, + {`http://localhost:https`, "", "", "", "", true}, // conflict + {`http://localhost:http`, "", "", "", "", true}, // repeated scheme + {`http://localhost:443`, "", "", "", "", true}, // not conventional + {`https://localhost:80`, "", "", "", "", true}, // not conventional + {`http://localhost`, "http", "localhost", "80", "", false}, + {`https://localhost`, "https", "localhost", "443", "", false}, + {`http://127.0.0.1`, "http", "127.0.0.1", "80", "", false}, + {`https://127.0.0.1`, "https", "127.0.0.1", "443", "", false}, + {`http://[::1]`, "http", "::1", "80", "", false}, + {`http://localhost:1234`, "http", "localhost", "1234", "", false}, + {`https://127.0.0.1:1234`, "https", "127.0.0.1", "1234", "", false}, + {`http://[::1]:1234`, "http", "::1", "1234", "", false}, + {``, "", "", "", "", false}, + {`::1`, "", "::1", "", "", true}, + {`localhost::`, "", "localhost::", "", "", true}, + {`#$%@`, "", "", "", "", true}, + {`host/path`, "", "host", "", "/path", false}, + {`http://host/`, "http", "host", "80", "/", false}, + {`//asdf`, "", "asdf", "", "", false}, + {`:1234/asdf`, "", "", "1234", "/asdf", false}, + {`http://host/path`, "http", "host", "80", "/path", false}, + {`https://host:443/path/foo`, "https", "host", "443", "/path/foo", false}, + {`host:80/path`, "", "host", "80", "/path", false}, + {`host:https/path`, "https", "host", "443", "/path", false}, + {`/path`, "", "", "", "/path", false}, + } { + actual, err := standardizeAddress(test.input) + + if err != nil && !test.shouldErr { + t.Errorf("Test %d (%s): Expected no error, but had error: %v", i, test.input, err) + } + if err == nil && test.shouldErr { + t.Errorf("Test %d (%s): Expected error, but had none", i, test.input) + } + + if !test.shouldErr && actual.Original != test.input { + t.Errorf("Test %d (%s): Expected original '%s', got '%s'", i, test.input, test.input, actual.Original) + } + if actual.Scheme != test.scheme { + t.Errorf("Test %d (%s): Expected scheme '%s', got '%s'", i, test.input, test.scheme, actual.Scheme) + } + if actual.Host != test.host { + t.Errorf("Test %d (%s): Expected host '%s', got '%s'", i, test.input, test.host, actual.Host) + } + if actual.Port != test.port { + t.Errorf("Test %d (%s): Expected port '%s', got '%s'", i, test.input, test.port, actual.Port) + } + if actual.Path != test.path { + t.Errorf("Test %d (%s): Expected path '%s', got '%s'", i, test.input, test.path, actual.Path) + } + } +} + +func TestAddressVHost(t *testing.T) { + for i, test := range []struct { + addr Address + expected string + }{ + {Address{Original: "host:1234"}, "host:1234"}, + {Address{Original: "host:1234/foo"}, "host:1234/foo"}, + {Address{Original: "host/foo"}, "host/foo"}, + {Address{Original: "http://host/foo"}, "host/foo"}, + {Address{Original: "https://host/foo"}, "host/foo"}, + } { + actual := test.addr.VHost() + if actual != test.expected { + t.Errorf("Test %d: expected '%s' but got '%s'", i, test.expected, actual) + } + } +} + +func TestAddressString(t *testing.T) { + for i, test := range []struct { + addr Address + expected string + }{ + {Address{Scheme: "http", Host: "host", Port: "1234", Path: "/path"}, "http://host:1234/path"}, + {Address{Scheme: "", Host: "host", Port: "", Path: ""}, "http://host"}, + {Address{Scheme: "", Host: "host", Port: "80", Path: ""}, "http://host"}, + {Address{Scheme: "", Host: "host", Port: "443", Path: ""}, "https://host"}, + {Address{Scheme: "https", Host: "host", Port: "443", Path: ""}, "https://host"}, + {Address{Scheme: "https", Host: "host", Port: "", Path: ""}, "https://host"}, + {Address{Scheme: "", Host: "host", Port: "80", Path: "/path"}, "http://host/path"}, + {Address{Scheme: "http", Host: "", Port: "1234", Path: ""}, "http://:1234"}, + {Address{Scheme: "", Host: "", Port: "", Path: ""}, ""}, + } { + actual := test.addr.String() + if actual != test.expected { + t.Errorf("Test %d: expected '%s' but got '%s'", i, test.expected, actual) + } + } +} diff --git a/caddyconfig/httpcaddyfile/builtins.go b/caddyconfig/httpcaddyfile/builtins.go new file mode 100644 index 0000000..7e51e46 --- /dev/null +++ b/caddyconfig/httpcaddyfile/builtins.go @@ -0,0 +1,257 @@ +// Copyright 2015 Matthew Holt and The Caddy Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httpcaddyfile + +import ( + "encoding/json" + "fmt" + "html" + "net/http" + + "github.com/caddyserver/caddy/v2/caddyconfig" + "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" + "github.com/caddyserver/caddy/v2/modules/caddyhttp" + "github.com/caddyserver/caddy/v2/modules/caddytls" +) + +func (st *ServerType) parseRoot( + tkns []caddyfile.Token, + matcherDefs map[string]map[string]json.RawMessage, + warnings *[]caddyconfig.Warning, +) ([]caddyhttp.Route, error) { + var routes []caddyhttp.Route + + matchersAndTokens, err := st.tokensToMatcherSets(tkns, matcherDefs, warnings) + if err != nil { + return nil, err + } + + for _, mst := range matchersAndTokens { + d := caddyfile.NewDispenser("Caddyfile", mst.tokens) + + var root string + for d.Next() { + if !d.NextArg() { + return nil, d.ArgErr() + } + root = d.Val() + if d.NextArg() { + return nil, d.ArgErr() + } + } + + varsHandler := caddyhttp.VarsMiddleware{"root": root} + route := caddyhttp.Route{ + Handle: []json.RawMessage{ + caddyconfig.JSONModuleObject(varsHandler, "handler", "vars", warnings), + }, + } + if mst.matcherSet != nil { + route.MatcherSets = []map[string]json.RawMessage{mst.matcherSet} + } + + routes = append(routes, route) + } + + return routes, nil +} + +func (st *ServerType) parseRedir( + tkns []caddyfile.Token, + matcherDefs map[string]map[string]json.RawMessage, + warnings *[]caddyconfig.Warning, +) ([]caddyhttp.Route, error) { + var routes []caddyhttp.Route + + matchersAndTokens, err := st.tokensToMatcherSets(tkns, matcherDefs, warnings) + if err != nil { + return nil, err + } + + for _, mst := range matchersAndTokens { + var route caddyhttp.Route + + d := caddyfile.NewDispenser("Caddyfile", mst.tokens) + + for d.Next() { + if !d.NextArg() { + return nil, d.ArgErr() + } + to := d.Val() + + var code string + if d.NextArg() { + code = d.Val() + } + if code == "permanent" { + code = "301" + } + if code == "temporary" || code == "" { + code = "307" + } + var body string + if code == "meta" { + // Script tag comes first since that will better imitate a redirect in the browser's + // history, but the meta tag is a fallback for most non-JS clients. + const metaRedir = `<!DOCTYPE html> +<html> + <head> + <title>Redirecting...</title> + <script>window.location.replace("%s");</script> + <meta http-equiv="refresh" content="0; URL='%s'"> + </head> + <body>Redirecting to <a href="%s">%s</a>...</body> +</html> +` + safeTo := html.EscapeString(to) + body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo) + } + + handler := caddyhttp.StaticResponse{ + StatusCode: caddyhttp.WeakString(code), + Headers: http.Header{"Location": []string{to}}, + Body: body, + } + + route.Handle = append(route.Handle, + caddyconfig.JSONModuleObject(handler, "handler", "static_response", warnings)) + } + + if mst.matcherSet != nil { + route.MatcherSets = []map[string]json.RawMessage{mst.matcherSet} + } + + routes = append(routes, route) + } + + return routes, nil +} + +func (st *ServerType) parseTLSAutomationManager(d *caddyfile.Dispenser) (caddytls.ACMEManagerMaker, error) { + var m caddytls.ACMEManagerMaker + + for d.Next() { + firstLine := d.RemainingArgs() + if len(firstLine) == 1 && firstLine[0] != "off" { + m.Email = firstLine[0] + } + + var hasBlock bool + for d.NextBlock() { + hasBlock = true + switch d.Val() { + case "ca": + arg := d.RemainingArgs() + if len(arg) != 1 { + return m, d.ArgErr() + } + m.CA = arg[0] + // TODO: other properties + } + } + + // a naked tls directive is not allowed + if len(firstLine) == 0 && !hasBlock { + return m, d.ArgErr() + } + } + + return m, nil +} + +func (st *ServerType) parseTLSCerts(d *caddyfile.Dispenser) (map[string]caddytls.CertificateLoader, error) { + var fileLoader caddytls.FileLoader + var folderLoader caddytls.FolderLoader + + for d.Next() { + // file loader + firstLine := d.RemainingArgs() + if len(firstLine) == 2 { + fileLoader = append(fileLoader, caddytls.CertKeyFilePair{ + Certificate: firstLine[0], + Key: firstLine[1], + // TODO: tags, for enterprise module's certificate selection + }) + } + + // folder loader + for d.NextBlock() { + if d.Val() == "load" { + folderLoader = append(folderLoader, d.RemainingArgs()...) + } + } + } + + // put configured loaders into the map + loaders := make(map[string]caddytls.CertificateLoader) + if len(fileLoader) > 0 { + loaders["load_files"] = fileLoader + } + if len(folderLoader) > 0 { + loaders["load_folders"] = folderLoader + } + + return loaders, nil +} + +func (st *ServerType) parseTLSConnPolicy(d *caddyfile.Dispenser) (*caddytls.ConnectionPolicy, error) { + cp := new(caddytls.ConnectionPolicy) + + for d.Next() { + for d.NextBlock() { + switch d.Val() { + case "protocols": + args := d.RemainingArgs() + if len(args) == 0 { + return nil, d.SyntaxErr("one or two protocols") + } + if len(args) > 0 { + if _, ok := caddytls.SupportedProtocols[args[0]]; !ok { + return nil, d.Errf("Wrong protocol name or protocol not supported: '%s'", args[0]) + } + cp.ProtocolMin = args[0] + } + if len(args) > 1 { + if _, ok := caddytls.SupportedProtocols[args[1]]; !ok { + return nil, d.Errf("Wrong protocol name or protocol not supported: '%s'", args[1]) + } + cp.ProtocolMax = args[1] + } + case "ciphers": + for d.NextArg() { + if _, ok := caddytls.SupportedCipherSuites[d.Val()]; !ok { + return nil, d.Errf("Wrong cipher suite name or cipher suite not supported: '%s'", d.Val()) + } + cp.CipherSuites = append(cp.CipherSuites, d.Val()) + } + case "curves": + for d.NextArg() { + if _, ok := caddytls.SupportedCurves[d.Val()]; !ok { + return nil, d.Errf("Wrong curve name or curve not supported: '%s'", d.Val()) + } + cp.Curves = append(cp.Curves, d.Val()) + } + case "alpn": + args := d.RemainingArgs() + if len(args) == 0 { + return nil, d.ArgErr() + } + cp.ALPN = args + } + } + } + + return cp, nil +} diff --git a/caddyconfig/httpcaddyfile/handlers.go b/caddyconfig/httpcaddyfile/handlers.go new file mode 100644 index 0000000..a90aa4a --- /dev/null +++ b/caddyconfig/httpcaddyfile/handlers.go @@ -0,0 +1,92 @@ +// Copyright 2015 Matthew Holt and The Caddy Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httpcaddyfile + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/caddyserver/caddy/v2" + "github.com/caddyserver/caddy/v2/caddyconfig" + "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" + "github.com/caddyserver/caddy/v2/modules/caddyhttp" +) + +func (st *ServerType) parseMatcherDefinitions(d *caddyfile.Dispenser) (map[string]map[string]json.RawMessage, error) { + matchers := make(map[string]map[string]json.RawMessage) + for d.Next() { + definitionName := d.Val() + for d.NextBlock() { + matcherName := d.Val() + mod, err := caddy.GetModule("http.matchers." + matcherName) + if err != nil { + return nil, fmt.Errorf("getting matcher module '%s': %v", matcherName, err) + } + unm, ok := mod.New().(caddyfile.Unmarshaler) + if !ok { + return nil, fmt.Errorf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName) + } + err = unm.UnmarshalCaddyfile(d.NewFromNextTokens()) + if err != nil { + return nil, err + } + rm, ok := unm.(caddyhttp.RequestMatcher) + if !ok { + return nil, fmt.Errorf("matcher module '%s' is not a request matcher", matcherName) + } + if _, ok := matchers[definitionName]; !ok { + matchers[definitionName] = make(map[string]json.RawMessage) + } + matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil) + } + } + return matchers, nil +} + +// directiveBuckets returns a list of middleware/handler directives. +// Buckets are ordered, and directives should be evaluated in their +// bucket order. Within a bucket, directives are not ordered. Hence, +// the return value has a slice of buckets, where each bucket is a +// map, which is a strongly-typed reminder that directives within a +// bucket are not ordered. +func directiveBuckets() []map[string]struct{} { + directiveBuckets := []map[string]struct{}{ + // prefer odd-numbered buckets; evens are there for contingencies + {}, // 0 + {}, // 1 - keep empty unless necessary + {}, // 2 + {}, // 3 - first handlers, last responders + {}, // 4 + {}, // 5 - middle of chain + {}, // 6 + {}, // 7 - last handlers, first responders + {}, // 8 + {}, // 9 - keep empty unless necessary + {}, // 10 + } + for _, mod := range caddy.GetModules("http.handlers") { + if hd, ok := mod.New().(HandlerDirective); ok { + bucket := hd.Bucket() + if bucket < 0 || bucket >= len(directiveBuckets) { + log.Printf("[ERROR] directive %s: bucket out of range [0-%d): %d; skipping", + mod.Name, len(directiveBuckets), bucket) + continue + } + directiveBuckets[bucket][mod.ID()] = struct{}{} + } + } + return directiveBuckets +} diff --git a/caddyconfig/httpcaddyfile/httptype.go b/caddyconfig/httpcaddyfile/httptype.go new file mode 100644 index 0000000..e5bf048 --- /dev/null +++ b/caddyconfig/httpcaddyfile/httptype.go @@ -0,0 +1,542 @@ +// Copyright 2015 Matthew Holt and The Caddy Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package httpcaddyfile + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/mholt/certmagic" + + "github.com/caddyserver/caddy/v2" + "github.com/caddyserver/caddy/v2/caddyconfig" + "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile" + "github.com/caddyserver/caddy/v2/modules/caddyhttp" + "github.com/caddyserver/caddy/v2/modules/caddytls" +) + +func init() { + caddyconfig.RegisterAdapter("caddyfile", caddyfile.Adapter{ServerType: ServerType{}}) +} + +// ServerType can set up a config from an HTTP Caddyfile. +type ServerType struct { +} + +// ValidDirectives returns the list of known directives. +func (ServerType) ValidDirectives() []string { + dirs := []string{"matcher", "root", "tls", "redir"} // TODO: put special-case (hard-coded, or non-handler) directives here + for _, mod := range caddy.GetModules("http.handlers") { + if _, ok := mod.New().(HandlerDirective); ok { + dirs = append(dirs, mod.ID()) + } + } + return dirs +} + +// Setup makes a config from the tokens. +func (st ServerType) Setup(originalServerBlocks []caddyfile.ServerBlock, + options map[string]string) (*caddy.Config, []caddyconfig.Warning, error) { + var warnings []caddyconfig.Warning + + // map + sbmap, err := st.mapAddressToServerBlocks(originalServerBlocks) + if err != nil { + return nil, warnings, err + } + + // reduce + pairings := st.consolidateAddrMappings(sbmap) + + // each pairing of listener addresses to list of server + // blocks is basically a server definition + servers, err := st.serversFromPairings(pairings, &warnings) + if err != nil { + return nil, warnings, err + } + + // now that each server is configured, make the HTTP app + httpApp := caddyhttp.App{ + HTTPPort: tryInt(options["http-port"], &warnings), + HTTPSPort: tryInt(options["https-port"], &warnings), + Servers: servers, + } + + // now for the TLS app! (TODO: refactor into own func) + tlsApp := caddytls.TLS{Certificates: make(map[string]json.RawMessage)} + for _, p := range pairings { + for _, sblock := range p.serverBlocks { + if tkns, ok := sblock.Tokens["tls"]; ok { + // extract all unique hostnames from the server block + // keys, then convert to a slice for use in the TLS app + hostMap := make(map[string]struct{}) + for _, sblockKey := range sblock.Keys { + addr, err := standardizeAddress(sblockKey) + if err != nil { + return nil, warnings, fmt.Errorf("parsing server block key: %v", err) + } + hostMap[addr.Host] = struct{}{} + } + sblockHosts := make([]string, 0, len(hostMap)) + for host := range hostMap { + sblockHosts = append(sblockHosts, host) + } + + // parse tokens to get ACME manager config + acmeMgr, err := st.parseTLSAutomationManager(caddyfile.NewDispenser("Caddyfile", tkns)) + if err != nil { + return nil, warnings, err + } + + tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, caddytls.AutomationPolicy{ + Hosts: sblockHosts, + ManagementRaw: caddyconfig.JSONModuleObject(acmeMgr, "module", "acme", &warnings), + }) + + // parse tokens to get certificates to be loaded manually + certLoaders, err := st.parseTLSCerts(caddyfile.NewDispenser("Caddyfile", tkns)) + if err != nil { + return nil, nil, err + } + for loaderName, loader := range certLoaders { + tlsApp.Certificates[loaderName] = caddyconfig.JSON(loader, &warnings) + } + + } + } + } + + // annnd the top-level config, then we're done! + cfg := &caddy.Config{AppsRaw: make(map[string]json.RawMessage)} + if !reflect.DeepEqual(httpApp, caddyhttp.App{}) { + cfg.AppsRaw["http"] = caddyconfig.JSON(httpApp, &warnings) + } + if !reflect.DeepEqual(tlsApp, caddytls.TLS{}) { + cfg.AppsRaw["tls"] = caddyconfig.JSON(tlsApp, &warnings) + } + + return cfg, warnings, nil +} + +// hostsFromServerBlockKeys returns a list of all the +// hostnames found in the keys of the server block sb. +// The list may not be in a consistent order. +func (st *ServerType) hostsFromServerBlockKeys(sb caddyfile.ServerBlock) ([]string, error) { + // first get each unique hostname + hostMap := make(map[string]struct{}) + for _, sblockKey := range sb.Keys { + addr, err := standardizeAddress(sblockKey) + if err != nil { + return nil, fmt.Errorf("parsing server block key: %v", err) + } + hostMap[addr.Host] = struct{}{} + } + + // convert map to slice + sblockHosts := make([]string, 0, len(hostMap)) + for host := range hostMap { + sblockHosts = append(sblockHosts, host) + } + + return sblockHosts, nil +} + +// serversFromPairings creates the servers for each pairing of addresses +// to server blocks. Each pairing is essentially a server definition. +func (st *ServerType) serversFromPairings(pairings []sbAddrAssociation, warnings *[]caddyconfig.Warning) (map[string]*caddyhttp.Server, error) { + servers := make(map[string]*caddyhttp.Server) + + for i, p := range pairings { + srv := &caddyhttp.Server{ + Listen: p.addresses, + } + + for _, sblock := range p.serverBlocks { + matcherSetsEnc, err := st.compileEncodedMatcherSets(sblock) + if err != nil { + return nil, fmt.Errorf("server block %v: compiling matcher sets: %v", sblock.Keys, err) + } + + // extract matcher definitions + d := caddyfile.NewDispenser("Caddyfile", sblock.Tokens["matcher"]) + matcherDefs, err := st.parseMatcherDefinitions(d) + if err != nil { + return nil, err + } + + siteVarSubroute, handlerSubroute := new(caddyhttp.Subroute), new(caddyhttp.Subroute) + + // built-in directives + + // root: path to root of site + if tkns, ok := sblock.Tokens["root"]; ok { + routes, err := st.parseRoot(tkns, matcherDefs, warnings) + if err != nil { + return nil, err + } + siteVarSubroute.Routes = append(siteVarSubroute.Routes, routes...) + } + + // tls: off and conn policies + if tkns, ok := sblock.Tokens["tls"]; ok { + // get the hosts for this server block... + hosts, err := st.hostsFromServerBlockKeys(sblock) + if err != nil { + return nil, err + } + + // ...and of those, which ones qualify for auto HTTPS + var autoHTTPSQualifiedHosts []string + for _, h := range hosts { + if certmagic.HostQualifies(h) { + autoHTTPSQualifiedHosts = append(autoHTTPSQualifiedHosts, h) + } + } + + if len(tkns) == 2 && tkns[1].Text == "off" { + // tls off: disable TLS (and automatic HTTPS) for server block's names + if srv.AutoHTTPS == nil { + srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig) + } + srv.AutoHTTPS.Skip = append(srv.AutoHTTPS.Skip, autoHTTPSQualifiedHosts...) + } else { + // tls connection policies + cp, err := st.parseTLSConnPolicy(caddyfile.NewDispenser("Caddyfile", tkns)) + if err != nil { + return nil, err + } + // TODO: are matchers needed if every hostname of the config is matched? + cp.Matchers = map[string]json.RawMessage{ + "sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones + } + srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp) + } + } + + // set up each handler directive + for _, dirBucket := range directiveBuckets() { + for dir := range dirBucket { + // keep in mind that multiple occurrences of the directive may appear here + tkns, ok := sblock.Tokens[dir] + if !ok { + continue + } + + // extract matcher sets from matcher tokens, if any + matcherSetsMap, err := st.tokensToMatcherSets(tkns, matcherDefs, warnings) + + mod, err := caddy.GetModule("http.handlers." + dir) + if err != nil { + return nil, fmt.Errorf("getting handler module '%s': %v", mod.Name, err) + } + + // the tokens have been divided by matcher set for us, + // so iterate each one and set them up + for _, mst := range matcherSetsMap { + unm, ok := mod.New().(caddyfile.Unmarshaler) + if !ok { + return nil, fmt.Errorf("handler module '%s' is not a Caddyfile unmarshaler", mod.Name) + } + err = unm.UnmarshalCaddyfile(caddyfile.NewDispenser(d.File(), mst.tokens)) + if err != nil { + return nil, err + } + handler, ok := unm.(caddyhttp.MiddlewareHandler) + if !ok { + return nil, fmt.Errorf("handler module '%s' does not implement caddyhttp.MiddlewareHandler interface", mod.Name) + } + + route := caddyhttp.Route{ + Handle: []json.RawMessage{ + caddyconfig.JSONModuleObject(handler, "handler", dir, warnings), + }, + } + if mst.matcherSet != nil { + route.MatcherSets = []map[string]json.RawMessage{mst.matcherSet} + } + handlerSubroute.Routes = append(handlerSubroute.Routes, route) + } + + } + } + + // redir: static responses that redirect + if tkns, ok := sblock.Tokens["redir"]; ok { + routes, err := st.parseRedir(tkns, matcherDefs, warnings) + if err != nil { + return nil, err + } + handlerSubroute.Routes = append(handlerSubroute.Routes, routes...) + } + + // the route that contains the site's handlers will + // be assumed to be the sub-route for this site... + siteSubroute := handlerSubroute + + // ... unless, of course, there are variables that might + // be used by the site's matchers or handlers, in which + // case we need to nest the handlers in a sub-sub-route, + // and the variables go in the sub-route so the variables + // get evaluated first + if len(siteVarSubroute.Routes) > 0 { + subSubRoute := caddyhttp.Subroute{Routes: siteSubroute.Routes} + siteSubroute.Routes = append( + siteVarSubroute.Routes, + caddyhttp.Route{ + Handle: []json.RawMessage{ + caddyconfig.JSONModuleObject(subSubRoute, "handler", "subroute", warnings), + }, + }, + ) + } + + siteSubroute.Routes = consolidateRoutes(siteSubroute.Routes) + + srv.Routes = append(srv.Routes, caddyhttp.Route{ + MatcherSets: matcherSetsEnc, + Handle: []json.RawMessage{ + caddyconfig.JSONModuleObject(siteSubroute, "handler", "subroute", warnings), + }, + }) + } + + srv.Routes = consolidateRoutes(srv.Routes) + + servers[fmt.Sprintf("srv%d", i)] = srv + } + + return servers, nil +} + +// consolidateRoutes combines routes with the same properties +// (same matchers, same Terminal and Group settings) for a +// cleaner overall output. +func consolidateRoutes(routes caddyhttp.RouteList) caddyhttp.RouteList { + for i := 0; i < len(routes)-1; i++ { + if reflect.DeepEqual(routes[i].MatcherSets, routes[i+1].MatcherSets) && + routes[i].Terminal == routes[i+1].Terminal && + routes[i].Group == routes[i+1].Group { + // keep the handlers in the same order, then splice out repetitive route + routes[i].Handle = append(routes[i].Handle, routes[i+1].Handle...) + routes = append(routes[:i+1], routes[i+2:]...) + i-- + } + } + return routes +} + +func (st *ServerType) tokensToMatcherSets( + tkns []caddyfile.Token, + matcherDefs map[string]map[string]json.RawMessage, + warnings *[]caddyconfig.Warning, +) (map[string]matcherSetAndTokens, error) { + m := make(map[string]matcherSetAndTokens) + + for len(tkns) > 0 { + d := caddyfile.NewDispenser("Caddyfile", tkns) + d.Next() // consume directive token + + // look for matcher; it should be the next argument + var matcherToken caddyfile.Token + var matcherSet map[string]json.RawMessage + if d.NextArg() { + var ok bool + var err error + matcherSet, ok, err = st.matcherSetFromMatcherToken(d.Token(), matcherDefs, warnings) + if err != nil { + return nil, err + } + if ok { + // found a matcher; save it, then splice it out + // since we don't want to parse it again + matcherToken = d.Token() + tkns = d.Delete() + } + d.RemainingArgs() // advance to end of line + } + for d.NextBlock() { + // skip entire block including any nested blocks; all + // we care about is accessing next directive occurrence + for d.Nested() { + d.NextBlock() + } + } + end := d.Cursor() + 1 + m[matcherToken.Text] = matcherSetAndTokens{ + matcherSet: matcherSet, + tokens: append(m[matcherToken.Text].tokens, tkns[:end]...), + } + tkns = tkns[end:] + } + return m, nil +} + +func (st *ServerType) matcherSetFromMatcherToken( + tkn caddyfile.Token, + matcherDefs map[string]map[string]json.RawMessage, + warnings *[]caddyconfig.Warning, +) (map[string]json.RawMessage, bool, error) { + // matcher tokens can be wildcards, simple path matchers, + // or refer to a pre-defined matcher by some name + if tkn.Text == "*" { + // match all requests == no matchers, so nothing to do + return nil, true, nil + } else if strings.HasPrefix(tkn.Text, "/") { + // convenient way to specify a single path match + return map[string]json.RawMessage{ + "path": caddyconfig.JSON(caddyhttp.MatchPath{tkn.Text}, warnings), + }, true, nil + } else if strings.HasPrefix(tkn.Text, "match:") { + // pre-defined matcher + matcherName := strings.TrimPrefix(tkn.Text, "match:") + m, ok := matcherDefs[matcherName] + if !ok { + return nil, false, fmt.Errorf("unrecognized matcher name: %+v", matcherName) + } + return m, true, nil + } + + return nil, false, nil +} + +func (st *ServerType) compileEncodedMatcherSets(sblock caddyfile.ServerBlock) ([]map[string]json.RawMessage, error) { + type hostPathPair struct { + hostm caddyhttp.MatchHost + pathm caddyhttp.MatchPath + } + + // keep routes with common host and path matchers together + var matcherPairs []*hostPathPair + + for _, key := range sblock.Keys { + addr, err := standardizeAddress(key) + if err != nil { + return nil, fmt.Errorf("server block %v: parsing and standardizing address '%s': %v", sblock.Keys, key, err) + } + + // choose a matcher pair that should be shared by this + // server block; if none exists yet, create one + var chosenMatcherPair *hostPathPair + for _, mp := range matcherPairs { + if (len(mp.pathm) == 0 && addr.Path == "") || + (len(mp.pathm) == 1 && mp.pathm[0] == addr.Path) { + chosenMatcherPair = mp + break + } + } + if chosenMatcherPair == nil { + chosenMatcherPair = new(hostPathPair) + if addr.Path != "" { + chosenMatcherPair.pathm = []string{addr.Path} + } + matcherPairs = append(matcherPairs, chosenMatcherPair) + } + + // add this server block's keys to the matcher + // pair if it doesn't already exist + if addr.Host != "" { + var found bool + for _, h := range chosenMatcherPair.hostm { + if h == addr.Host { + found = true + break + } + } + if !found { + chosenMatcherPair.hostm = append(chosenMatcherPair.hostm, addr.Host) + } + } + } + + // iterate each pairing of host and path matchers and + // put them into a map for JSON encoding + var matcherSets []map[string]caddyhttp.RequestMatcher + for _, mp := range matcherPairs { + matcherSet := make(map[string]caddyhttp.RequestMatcher) + if len(mp.hostm) > 0 { + matcherSet["host"] = mp.hostm + } + if len(mp.pathm) > 0 { + matcherSet["path"] = mp.pathm + } + if len(matcherSet) > 0 { + matcherSets = append(matcherSets, matcherSet) + } + } + + // finally, encode each of the matcher sets + var matcherSetsEnc []map[string]json.RawMessage + for _, ms := range matcherSets { + msEncoded, err := encodeMatcherSet(ms) + if err != nil { + return nil, fmt.Errorf("server block %v: %v", sblock.Keys, err) + } + matcherSetsEnc = append(matcherSetsEnc, msEncoded) + } + + return matcherSetsEnc, nil +} + +func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcher) (map[string]json.RawMessage, error) { + msEncoded := make(map[string]json.RawMessage) + for matcherName, val := range matchers { + jsonBytes, err := json.Marshal(val) + if err != nil { + return nil, fmt.Errorf("marshaling matcher set %#v: %v", matchers, err) + } + msEncoded[matcherName] = jsonBytes + } + return msEncoded, nil +} + +// HandlerDirective implements a directive for an HTTP handler, +// in that it can unmarshal its own configuration from Caddyfile +// tokens and also specify which directive bucket it belongs in. +type HandlerDirective interface { + caddyfile.Unmarshaler + Bucket() int +} + +// tryInt tries to convert str to an integer. If it fails, it downgrades +// the error to a warning and returns 0. +func tryInt(str string, warnings *[]caddyconfig.Warning) int { + if str == "" { + return 0 + } + val, err := strconv.Atoi(str) + if err != nil && warnings != nil { + *warnings = append(*warnings, caddyconfig.Warning{Message: err.Error()}) + } + return val +} + +type matcherSetAndTokens struct { + matcherSet map[string]json.RawMessage + tokens []caddyfile.Token +} + +// sbAddrAssocation is a mapping from a list of +// addresses to a list of server blocks that are +// served on those addresses. +type sbAddrAssociation struct { + addresses []string + serverBlocks []caddyfile.ServerBlock +} + +// Interface guard +var _ caddyfile.ServerType = (*ServerType)(nil) |