summaryrefslogtreecommitdiff
path: root/caddyconfig
diff options
context:
space:
mode:
Diffstat (limited to 'caddyconfig')
-rw-r--r--caddyconfig/caddyfile/adapter.go87
-rwxr-xr-xcaddyconfig/caddyfile/dispenser.go341
-rwxr-xr-xcaddyconfig/caddyfile/dispenser_test.go316
-rwxr-xr-xcaddyconfig/caddyfile/lexer.go150
-rwxr-xr-xcaddyconfig/caddyfile/lexer_test.go196
-rwxr-xr-xcaddyconfig/caddyfile/parse.go521
-rwxr-xr-xcaddyconfig/caddyfile/parse_test.go681
-rwxr-xr-xcaddyconfig/caddyfile/testdata/import_glob0.txt6
-rwxr-xr-xcaddyconfig/caddyfile/testdata/import_glob1.txt4
-rwxr-xr-xcaddyconfig/caddyfile/testdata/import_glob2.txt3
-rwxr-xr-xcaddyconfig/caddyfile/testdata/import_test1.txt2
-rwxr-xr-xcaddyconfig/caddyfile/testdata/import_test2.txt4
-rw-r--r--caddyconfig/configadapters.go113
-rw-r--r--caddyconfig/httpcaddyfile/addresses.go332
-rw-r--r--caddyconfig/httpcaddyfile/addresses_test.go166
-rw-r--r--caddyconfig/httpcaddyfile/builtins.go255
-rw-r--r--caddyconfig/httpcaddyfile/directives.go182
-rw-r--r--caddyconfig/httpcaddyfile/handlers.go56
-rw-r--r--caddyconfig/httpcaddyfile/httptype.go519
19 files changed, 3934 insertions, 0 deletions
diff --git a/caddyconfig/caddyfile/adapter.go b/caddyconfig/caddyfile/adapter.go
new file mode 100644
index 0000000..377f77b
--- /dev/null
+++ b/caddyconfig/caddyfile/adapter.go
@@ -0,0 +1,87 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+)
+
+// Adapter adapts Caddyfile to Caddy JSON.
+type Adapter struct {
+ ServerType ServerType
+}
+
+// Adapt converts the Caddyfile config in body to Caddy JSON.
+func (a Adapter) Adapt(body []byte, options map[string]string) ([]byte, []caddyconfig.Warning, error) {
+ if a.ServerType == nil {
+ return nil, nil, fmt.Errorf("no server type")
+ }
+ if options == nil {
+ options = make(map[string]string)
+ }
+
+ filename := options["filename"]
+ if filename == "" {
+ filename = "Caddyfile"
+ }
+
+ serverBlocks, err := Parse(filename, bytes.NewReader(body))
+ if err != nil {
+ return nil, nil, err
+ }
+
+ cfg, warnings, err := a.ServerType.Setup(serverBlocks, options)
+ if err != nil {
+ return nil, warnings, err
+ }
+
+ marshalFunc := json.Marshal
+ if options["pretty"] == "true" {
+ marshalFunc = caddyconfig.JSONIndent
+ }
+ result, err := marshalFunc(cfg)
+
+ return result, warnings, err
+}
+
+// Unmarshaler is a type that can unmarshal
+// Caddyfile tokens to set itself up for a
+// JSON encoding. The goal of an unmarshaler
+// is not to set itself up for actual use,
+// but to set itself up for being marshaled
+// into JSON. Caddyfile-unmarshaled values
+// will not be used directly; they will be
+// encoded as JSON and then used from that.
+type Unmarshaler interface {
+ UnmarshalCaddyfile(d *Dispenser) error
+}
+
+// ServerType is a type that can evaluate a Caddyfile and set up a caddy config.
+type ServerType interface {
+ // Setup takes the server blocks which
+ // contain tokens, as well as options
+ // (e.g. CLI flags) and creates a Caddy
+ // config, along with any warnings or
+ // an error.
+ Setup([]ServerBlock, map[string]string) (*caddy.Config, []caddyconfig.Warning, error)
+}
+
+// Interface guard
+var _ caddyconfig.Adapter = (*Adapter)(nil)
diff --git a/caddyconfig/caddyfile/dispenser.go b/caddyconfig/caddyfile/dispenser.go
new file mode 100755
index 0000000..0d2c789
--- /dev/null
+++ b/caddyconfig/caddyfile/dispenser.go
@@ -0,0 +1,341 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// Dispenser is a type that dispenses tokens, similarly to a lexer,
+// except that it can do so with some notion of structure. An empty
+// Dispenser is invalid; call NewDispenser to make a proper instance.
+type Dispenser struct {
+ filename string
+ tokens []Token
+ cursor int
+ nesting int
+}
+
+// NewDispenser returns a Dispenser filled with the given tokens.
+// TODO: Get rid of the filename argument; it seems pointless here
+func NewDispenser(filename string, tokens []Token) *Dispenser {
+ return &Dispenser{
+ filename: filename,
+ tokens: tokens,
+ cursor: -1,
+ }
+}
+
+// Next loads the next token. Returns true if a token
+// was loaded; false otherwise. If false, all tokens
+// have been consumed.
+func (d *Dispenser) Next() bool {
+ if d.cursor < len(d.tokens)-1 {
+ d.cursor++
+ return true
+ }
+ return false
+}
+
+// Prev moves to the previous token. It does the inverse
+// of Next(), except this function may decrement the cursor
+// to -1 so that the next call to Next() points to the
+// first token; this allows dispensing to "start over". This
+// method returns true if the cursor ends up pointing to a
+// valid token.
+func (d *Dispenser) Prev() bool {
+ if d.cursor > -1 {
+ d.cursor--
+ return d.cursor > -1
+ }
+ return false
+}
+
+// NextArg loads the next token if it is on the same
+// line and if it is not a block opening (open curly
+// brace). Returns true if an argument token was
+// loaded; false otherwise. If false, all tokens on
+// the line have been consumed except for potentially
+// a block opening. It handles imported tokens
+// correctly.
+func (d *Dispenser) NextArg() bool {
+ if !d.nextOnSameLine() {
+ return false
+ }
+ if d.Val() == "{" {
+ // roll back; a block opening is not an argument
+ d.cursor--
+ return false
+ }
+ return true
+}
+
+// nextOnSameLine advances the cursor if the next
+// token is on the same line of the same file.
+func (d *Dispenser) nextOnSameLine() bool {
+ if d.cursor < 0 {
+ d.cursor++
+ return true
+ }
+ if d.cursor >= len(d.tokens) {
+ return false
+ }
+ if d.cursor < len(d.tokens)-1 &&
+ d.tokens[d.cursor].File == d.tokens[d.cursor+1].File &&
+ d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) == d.tokens[d.cursor+1].Line {
+ d.cursor++
+ return true
+ }
+ return false
+}
+
+// NextLine loads the next token only if it is not on the same
+// line as the current token, and returns true if a token was
+// loaded; false otherwise. If false, there is not another token
+// or it is on the same line. It handles imported tokens correctly.
+func (d *Dispenser) NextLine() bool {
+ if d.cursor < 0 {
+ d.cursor++
+ return true
+ }
+ if d.cursor >= len(d.tokens) {
+ return false
+ }
+ if d.cursor < len(d.tokens)-1 &&
+ (d.tokens[d.cursor].File != d.tokens[d.cursor+1].File ||
+ d.tokens[d.cursor].Line+d.numLineBreaks(d.cursor) < d.tokens[d.cursor+1].Line) {
+ d.cursor++
+ return true
+ }
+ return false
+}
+
+// NextBlock can be used as the condition of a for loop
+// to load the next token as long as it opens a block or
+// is already in a block. It returns true if a token was
+// loaded, or false when the block's closing curly brace
+// was loaded and thus the block ended. Nested blocks are
+// not supported.
+func (d *Dispenser) NextBlock() bool {
+ if d.nesting > 0 {
+ d.Next()
+ if d.Val() == "}" {
+ d.nesting--
+ return false
+ }
+ return true
+ }
+ if !d.nextOnSameLine() { // block must open on same line
+ return false
+ }
+ if d.Val() != "{" {
+ d.cursor-- // roll back if not opening brace
+ return false
+ }
+ d.Next()
+ if d.Val() == "}" {
+ // open and then closed right away
+ return false
+ }
+ d.nesting++
+ return true
+}
+
+// Nested returns true if the token is currently nested
+// inside a block (i.e. an open curly brace was consumed).
+func (d *Dispenser) Nested() bool {
+ return d.nesting > 0
+}
+
+// Val gets the text of the current token. If there is no token
+// loaded, it returns empty string.
+func (d *Dispenser) Val() string {
+ if d.cursor < 0 || d.cursor >= len(d.tokens) {
+ return ""
+ }
+ return d.tokens[d.cursor].Text
+}
+
+// Line gets the line number of the current token. If there is no token
+// loaded, it returns 0.
+func (d *Dispenser) Line() int {
+ if d.cursor < 0 || d.cursor >= len(d.tokens) {
+ return 0
+ }
+ return d.tokens[d.cursor].Line
+}
+
+// File gets the filename of the current token. If there is no token loaded,
+// it returns the filename originally given when parsing started.
+func (d *Dispenser) File() string {
+ if d.cursor < 0 || d.cursor >= len(d.tokens) {
+ return d.filename
+ }
+ if tokenFilename := d.tokens[d.cursor].File; tokenFilename != "" {
+ return tokenFilename
+ }
+ return d.filename
+}
+
+// Args is a convenience function that loads the next arguments
+// (tokens on the same line) into an arbitrary number of strings
+// pointed to in targets. If there are fewer tokens available
+// than string pointers, the remaining strings will not be changed
+// and false will be returned. If there were enough tokens available
+// to fill the arguments, then true will be returned.
+func (d *Dispenser) Args(targets ...*string) bool {
+ for i := 0; i < len(targets); i++ {
+ if !d.NextArg() {
+ return false
+ }
+ *targets[i] = d.Val()
+ }
+ return true
+}
+
+// RemainingArgs loads any more arguments (tokens on the same line)
+// into a slice and returns them. Open curly brace tokens also indicate
+// the end of arguments, and the curly brace is not included in
+// the return value nor is it loaded.
+func (d *Dispenser) RemainingArgs() []string {
+ var args []string
+ for d.NextArg() {
+ args = append(args, d.Val())
+ }
+ return args
+}
+
+// NewFromNextTokens returns a new dispenser with a copy of
+// the tokens from the current token until the end of the
+// "directive" whether that be to the end of the line or
+// the end of a block that starts at the end of the line.
+func (d *Dispenser) NewFromNextTokens() *Dispenser {
+ tkns := []Token{d.Token()}
+ for d.NextArg() {
+ tkns = append(tkns, d.Token())
+ }
+ if d.Next() && d.Val() == "{" {
+ tkns = append(tkns, d.Token())
+ for d.NextBlock() {
+ for d.Nested() {
+ tkns = append(tkns, d.Token())
+ d.NextBlock()
+ }
+ }
+ tkns = append(tkns, d.Token())
+ } else {
+ d.cursor--
+ }
+ return NewDispenser(d.filename, tkns)
+}
+
+// Token returns the current token.
+func (d *Dispenser) Token() Token {
+ return d.TokenAt(d.cursor)
+}
+
+func (d *Dispenser) TokenAt(cursor int) Token {
+ if cursor < 0 || cursor >= len(d.tokens) {
+ return Token{}
+ }
+ return d.tokens[cursor]
+}
+
+// Cursor returns the current cursor (token index).
+func (d *Dispenser) Cursor() int {
+ return d.cursor
+}
+
+func (d *Dispenser) Reset() {
+ d.cursor = -1
+}
+
+// ArgErr returns an argument error, meaning that another
+// argument was expected but not found. In other words,
+// a line break or open curly brace was encountered instead of
+// an argument.
+func (d *Dispenser) ArgErr() error {
+ if d.Val() == "{" {
+ return d.Err("Unexpected token '{', expecting argument")
+ }
+ return d.Errf("Wrong argument count or unexpected line ending after '%s'", d.Val())
+}
+
+// SyntaxErr creates a generic syntax error which explains what was
+// found and what was expected.
+func (d *Dispenser) SyntaxErr(expected string) error {
+ msg := fmt.Sprintf("%s:%d - Syntax error: Unexpected token '%s', expecting '%s'", d.File(), d.Line(), d.Val(), expected)
+ return errors.New(msg)
+}
+
+// EOFErr returns an error indicating that the dispenser reached
+// the end of the input when searching for the next token.
+func (d *Dispenser) EOFErr() error {
+ return d.Errf("Unexpected EOF")
+}
+
+// Err generates a custom parse-time error with a message of msg.
+func (d *Dispenser) Err(msg string) error {
+ msg = fmt.Sprintf("%s:%d - Error during parsing: %s", d.File(), d.Line(), msg)
+ return errors.New(msg)
+}
+
+// Errf is like Err, but for formatted error messages
+func (d *Dispenser) Errf(format string, args ...interface{}) error {
+ return d.Err(fmt.Sprintf(format, args...))
+}
+
+// Delete deletes the current token and returns the updated slice
+// of tokens. The cursor is not advanced to the next token.
+// Because deletion modifies the underlying slice, this method
+// should only be called if you have access to the original slice
+// of tokens and/or are using the slice of tokens outside this
+// Dispenser instance. If you do not re-assign the slice with the
+// return value of this method, inconsistencies in the token
+// array will become apparent (or worse, hide from you like they
+// did me for 3 and a half freaking hours late one night).
+func (d *Dispenser) Delete() []Token {
+ if d.cursor >= 0 && d.cursor < len(d.tokens)-1 {
+ d.tokens = append(d.tokens[:d.cursor], d.tokens[d.cursor+1:]...)
+ d.cursor--
+ }
+ return d.tokens
+}
+
+// numLineBreaks counts how many line breaks are in the token
+// value given by the token index tknIdx. It returns 0 if the
+// token does not exist or there are no line breaks.
+func (d *Dispenser) numLineBreaks(tknIdx int) int {
+ if tknIdx < 0 || tknIdx >= len(d.tokens) {
+ return 0
+ }
+ return strings.Count(d.tokens[tknIdx].Text, "\n")
+}
+
+// isNewLine determines whether the current token is on a different
+// line (higher line number) than the previous token. It handles imported
+// tokens correctly. If there isn't a previous token, it returns true.
+func (d *Dispenser) isNewLine() bool {
+ if d.cursor < 1 {
+ return true
+ }
+ if d.cursor > len(d.tokens)-1 {
+ return false
+ }
+ return d.tokens[d.cursor-1].File != d.tokens[d.cursor].File ||
+ d.tokens[d.cursor-1].Line+d.numLineBreaks(d.cursor-1) < d.tokens[d.cursor].Line
+}
diff --git a/caddyconfig/caddyfile/dispenser_test.go b/caddyconfig/caddyfile/dispenser_test.go
new file mode 100755
index 0000000..9860bed
--- /dev/null
+++ b/caddyconfig/caddyfile/dispenser_test.go
@@ -0,0 +1,316 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "io"
+ "log"
+ "reflect"
+ "strings"
+ "testing"
+)
+
+func TestDispenser_Val_Next(t *testing.T) {
+ input := `host:port
+ dir1 arg1
+ dir2 arg2 arg3
+ dir3`
+ d := newTestDispenser(input)
+
+ if val := d.Val(); val != "" {
+ t.Fatalf("Val(): Should return empty string when no token loaded; got '%s'", val)
+ }
+
+ assertNext := func(shouldLoad bool, expectedCursor int, expectedVal string) {
+ if loaded := d.Next(); loaded != shouldLoad {
+ t.Errorf("Next(): Expected %v but got %v instead (val '%s')", shouldLoad, loaded, d.Val())
+ }
+ if d.cursor != expectedCursor {
+ t.Errorf("Expected cursor to be %d, but was %d", expectedCursor, d.cursor)
+ }
+ if d.nesting != 0 {
+ t.Errorf("Nesting should be 0, was %d instead", d.nesting)
+ }
+ if val := d.Val(); val != expectedVal {
+ t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val)
+ }
+ }
+
+ assertNext(true, 0, "host:port")
+ assertNext(true, 1, "dir1")
+ assertNext(true, 2, "arg1")
+ assertNext(true, 3, "dir2")
+ assertNext(true, 4, "arg2")
+ assertNext(true, 5, "arg3")
+ assertNext(true, 6, "dir3")
+ // Note: This next test simply asserts existing behavior.
+ // If desired, we may wish to empty the token value after
+ // reading past the EOF. Open an issue if you want this change.
+ assertNext(false, 6, "dir3")
+}
+
+func TestDispenser_NextArg(t *testing.T) {
+ input := `dir1 arg1
+ dir2 arg2 arg3
+ dir3`
+ d := newTestDispenser(input)
+
+ assertNext := func(shouldLoad bool, expectedVal string, expectedCursor int) {
+ if d.Next() != shouldLoad {
+ t.Errorf("Next(): Should load token but got false instead (val: '%s')", d.Val())
+ }
+ if d.cursor != expectedCursor {
+ t.Errorf("Next(): Expected cursor to be at %d, but it was %d", expectedCursor, d.cursor)
+ }
+ if val := d.Val(); val != expectedVal {
+ t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val)
+ }
+ }
+
+ assertNextArg := func(expectedVal string, loadAnother bool, expectedCursor int) {
+ if !d.NextArg() {
+ t.Error("NextArg(): Should load next argument but got false instead")
+ }
+ if d.cursor != expectedCursor {
+ t.Errorf("NextArg(): Expected cursor to be at %d, but it was %d", expectedCursor, d.cursor)
+ }
+ if val := d.Val(); val != expectedVal {
+ t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val)
+ }
+ if !loadAnother {
+ if d.NextArg() {
+ t.Fatalf("NextArg(): Should NOT load another argument, but got true instead (val: '%s')", d.Val())
+ }
+ if d.cursor != expectedCursor {
+ t.Errorf("NextArg(): Expected cursor to remain at %d, but it was %d", expectedCursor, d.cursor)
+ }
+ }
+ }
+
+ assertNext(true, "dir1", 0)
+ assertNextArg("arg1", false, 1)
+ assertNext(true, "dir2", 2)
+ assertNextArg("arg2", true, 3)
+ assertNextArg("arg3", false, 4)
+ assertNext(true, "dir3", 5)
+ assertNext(false, "dir3", 5)
+}
+
+func TestDispenser_NextLine(t *testing.T) {
+ input := `host:port
+ dir1 arg1
+ dir2 arg2 arg3`
+ d := newTestDispenser(input)
+
+ assertNextLine := func(shouldLoad bool, expectedVal string, expectedCursor int) {
+ if d.NextLine() != shouldLoad {
+ t.Errorf("NextLine(): Should load token but got false instead (val: '%s')", d.Val())
+ }
+ if d.cursor != expectedCursor {
+ t.Errorf("NextLine(): Expected cursor to be %d, instead was %d", expectedCursor, d.cursor)
+ }
+ if val := d.Val(); val != expectedVal {
+ t.Errorf("Val(): Expected '%s' but got '%s'", expectedVal, val)
+ }
+ }
+
+ assertNextLine(true, "host:port", 0)
+ assertNextLine(true, "dir1", 1)
+ assertNextLine(false, "dir1", 1)
+ d.Next() // arg1
+ assertNextLine(true, "dir2", 3)
+ assertNextLine(false, "dir2", 3)
+ d.Next() // arg2
+ assertNextLine(false, "arg2", 4)
+ d.Next() // arg3
+ assertNextLine(false, "arg3", 5)
+}
+
+func TestDispenser_NextBlock(t *testing.T) {
+ input := `foobar1 {
+ sub1 arg1
+ sub2
+ }
+ foobar2 {
+ }`
+ d := newTestDispenser(input)
+
+ assertNextBlock := func(shouldLoad bool, expectedCursor, expectedNesting int) {
+ if loaded := d.NextBlock(); loaded != shouldLoad {
+ t.Errorf("NextBlock(): Should return %v but got %v", shouldLoad, loaded)
+ }
+ if d.cursor != expectedCursor {
+ t.Errorf("NextBlock(): Expected cursor to be %d, was %d", expectedCursor, d.cursor)
+ }
+ if d.nesting != expectedNesting {
+ t.Errorf("NextBlock(): Nesting should be %d, not %d", expectedNesting, d.nesting)
+ }
+ }
+
+ assertNextBlock(false, -1, 0)
+ d.Next() // foobar1
+ assertNextBlock(true, 2, 1)
+ assertNextBlock(true, 3, 1)
+ assertNextBlock(true, 4, 1)
+ assertNextBlock(false, 5, 0)
+ d.Next() // foobar2
+ assertNextBlock(false, 8, 0) // empty block is as if it didn't exist
+}
+
+func TestDispenser_Args(t *testing.T) {
+ var s1, s2, s3 string
+ input := `dir1 arg1 arg2 arg3
+ dir2 arg4 arg5
+ dir3 arg6 arg7
+ dir4`
+ d := newTestDispenser(input)
+
+ d.Next() // dir1
+
+ // As many strings as arguments
+ if all := d.Args(&s1, &s2, &s3); !all {
+ t.Error("Args(): Expected true, got false")
+ }
+ if s1 != "arg1" {
+ t.Errorf("Args(): Expected s1 to be 'arg1', got '%s'", s1)
+ }
+ if s2 != "arg2" {
+ t.Errorf("Args(): Expected s2 to be 'arg2', got '%s'", s2)
+ }
+ if s3 != "arg3" {
+ t.Errorf("Args(): Expected s3 to be 'arg3', got '%s'", s3)
+ }
+
+ d.Next() // dir2
+
+ // More strings than arguments
+ if all := d.Args(&s1, &s2, &s3); all {
+ t.Error("Args(): Expected false, got true")
+ }
+ if s1 != "arg4" {
+ t.Errorf("Args(): Expected s1 to be 'arg4', got '%s'", s1)
+ }
+ if s2 != "arg5" {
+ t.Errorf("Args(): Expected s2 to be 'arg5', got '%s'", s2)
+ }
+ if s3 != "arg3" {
+ t.Errorf("Args(): Expected s3 to be unchanged ('arg3'), instead got '%s'", s3)
+ }
+
+ // (quick cursor check just for kicks and giggles)
+ if d.cursor != 6 {
+ t.Errorf("Cursor should be 6, but is %d", d.cursor)
+ }
+
+ d.Next() // dir3
+
+ // More arguments than strings
+ if all := d.Args(&s1); !all {
+ t.Error("Args(): Expected true, got false")
+ }
+ if s1 != "arg6" {
+ t.Errorf("Args(): Expected s1 to be 'arg6', got '%s'", s1)
+ }
+
+ d.Next() // dir4
+
+ // No arguments or strings
+ if all := d.Args(); !all {
+ t.Error("Args(): Expected true, got false")
+ }
+
+ // No arguments but at least one string
+ if all := d.Args(&s1); all {
+ t.Error("Args(): Expected false, got true")
+ }
+}
+
+func TestDispenser_RemainingArgs(t *testing.T) {
+ input := `dir1 arg1 arg2 arg3
+ dir2 arg4 arg5
+ dir3 arg6 { arg7
+ dir4`
+ d := newTestDispenser(input)
+
+ d.Next() // dir1
+
+ args := d.RemainingArgs()
+ if expected := []string{"arg1", "arg2", "arg3"}; !reflect.DeepEqual(args, expected) {
+ t.Errorf("RemainingArgs(): Expected %v, got %v", expected, args)
+ }
+
+ d.Next() // dir2
+
+ args = d.RemainingArgs()
+ if expected := []string{"arg4", "arg5"}; !reflect.DeepEqual(args, expected) {
+ t.Errorf("RemainingArgs(): Expected %v, got %v", expected, args)
+ }
+
+ d.Next() // dir3
+
+ args = d.RemainingArgs()
+ if expected := []string{"arg6"}; !reflect.DeepEqual(args, expected) {
+ t.Errorf("RemainingArgs(): Expected %v, got %v", expected, args)
+ }
+
+ d.Next() // {
+ d.Next() // arg7
+ d.Next() // dir4
+
+ args = d.RemainingArgs()
+ if len(args) != 0 {
+ t.Errorf("RemainingArgs(): Expected %v, got %v", []string{}, args)
+ }
+}
+
+func TestDispenser_ArgErr_Err(t *testing.T) {
+ input := `dir1 {
+ }
+ dir2 arg1 arg2`
+ d := newTestDispenser(input)
+
+ d.cursor = 1 // {
+
+ if err := d.ArgErr(); err == nil || !strings.Contains(err.Error(), "{") {
+ t.Errorf("ArgErr(): Expected an error message with { in it, but got '%v'", err)
+ }
+
+ d.cursor = 5 // arg2
+
+ if err := d.ArgErr(); err == nil || !strings.Contains(err.Error(), "arg2") {
+ t.Errorf("ArgErr(): Expected an error message with 'arg2' in it; got '%v'", err)
+ }
+
+ err := d.Err("foobar")
+ if err == nil {
+ t.Fatalf("Err(): Expected an error, got nil")
+ }
+
+ if !strings.Contains(err.Error(), "Testfile:3") {
+ t.Errorf("Expected error message with filename:line in it; got '%v'", err)
+ }
+
+ if !strings.Contains(err.Error(), "foobar") {
+ t.Errorf("Expected error message with custom message in it ('foobar'); got '%v'", err)
+ }
+}
+
+func newTestDispenser(input string) *Dispenser {
+ tokens, err := allTokens(strings.NewReader(input))
+ if err != nil && err != io.EOF {
+ log.Fatalf("getting all tokens from input: %v", err)
+ }
+ return NewDispenser("Testfile", tokens)
+}
diff --git a/caddyconfig/caddyfile/lexer.go b/caddyconfig/caddyfile/lexer.go
new file mode 100755
index 0000000..efe648d
--- /dev/null
+++ b/caddyconfig/caddyfile/lexer.go
@@ -0,0 +1,150 @@
+// Copyright 2015 Light Code Labs, LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "bufio"
+ "io"
+ "unicode"
+)
+
+type (
+ // lexer is a utility which can get values, token by
+ // token, from a Reader. A token is a word, and tokens
+ // are separated by whitespace. A word can be enclosed
+ // in quotes if it contains whitespace.
+ lexer struct {
+ reader *bufio.Reader
+ token Token
+ line int
+ }
+
+ // Token represents a single parsable unit.
+ Token struct {
+ File string
+ Line int
+ Text string
+ }
+)
+
+// load prepares the lexer to scan an input for tokens.
+// It discards any leading byte order mark.
+func (l *lexer) load(input io.Reader) error {
+ l.reader = bufio.NewReader(input)
+ l.line = 1
+
+ // discard byte order mark, if present
+ firstCh, _, err := l.reader.ReadRune()
+ if err != nil {
+ return err
+ }
+ if firstCh != 0xFEFF {
+ err := l.reader.UnreadRune()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// next loads the next token into the lexer.
+// A token is delimited by whitespace, unless
+// the token starts with a quotes character (")
+// in which case the token goes until the closing
+// quotes (the enclosing quotes are not included).
+// Inside quoted strings, quotes may be escaped
+// with a preceding \ character. No other chars
+// may be escaped. The rest of the line is skipped
+// if a "#" character is read in. Returns true if
+// a token was loaded; false otherwise.
+func (l *lexer) next() bool {
+ var val []rune
+ var comment, quoted, escaped bool
+
+ makeToken := func() bool {
+ l.token.Text = string(val)
+ return true
+ }
+
+ for {
+ ch, _, err := l.reader.ReadRune()
+ if err != nil {
+ if len(val) > 0 {
+ return makeToken()
+ }
+ if err == io.EOF {
+ return false
+ }
+ panic(err)
+ }
+
+ if quoted {
+ if !escaped {
+ if ch == '\\' {
+ escaped = true
+ continue
+ } else if ch == '"' {
+ quoted = false
+ return makeToken()
+ }
+ }
+ if ch == '\n' {
+ l.line++
+ }
+ if escaped {
+ // only escape quotes and newlines
+ if ch != '"' && ch != '\n' {
+ val = append(val, '\\')
+ }
+ }
+ val = append(val, ch)
+ escaped = false
+ continue
+ }
+
+ if unicode.IsSpace(ch) {
+ if ch == '\r' {
+ continue
+ }
+ if ch == '\n' {
+ l.line++
+ comment = false
+ }
+ if len(val) > 0 {
+ return makeToken()
+ }
+ continue
+ }
+
+ if ch == '#' {
+ comment = true
+ }
+
+ if comment {
+ continue
+ }
+
+ if len(val) == 0 {
+ l.token = Token{Line: l.line}
+ if ch == '"' {
+ quoted = true
+ continue
+ }
+ }
+
+ val = append(val, ch)
+ }
+}
diff --git a/caddyconfig/caddyfile/lexer_test.go b/caddyconfig/caddyfile/lexer_test.go
new file mode 100755
index 0000000..f9a843c
--- /dev/null
+++ b/caddyconfig/caddyfile/lexer_test.go
@@ -0,0 +1,196 @@
+// Copyright 2015 Light Code Labs, LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "log"
+ "strings"
+ "testing"
+)
+
+type lexerTestCase struct {
+ input string
+ expected []Token
+}
+
+func TestLexer(t *testing.T) {
+ testCases := []lexerTestCase{
+ {
+ input: `host:123`,
+ expected: []Token{
+ {Line: 1, Text: "host:123"},
+ },
+ },
+ {
+ input: `host:123
+
+ directive`,
+ expected: []Token{
+ {Line: 1, Text: "host:123"},
+ {Line: 3, Text: "directive"},
+ },
+ },
+ {
+ input: `host:123 {
+ directive
+ }`,
+ expected: []Token{
+ {Line: 1, Text: "host:123"},
+ {Line: 1, Text: "{"},
+ {Line: 2, Text: "directive"},
+ {Line: 3, Text: "}"},
+ },
+ },
+ {
+ input: `host:123 { directive }`,
+ expected: []Token{
+ {Line: 1, Text: "host:123"},
+ {Line: 1, Text: "{"},
+ {Line: 1, Text: "directive"},
+ {Line: 1, Text: "}"},
+ },
+ },
+ {
+ input: `host:123 {
+ #comment
+ directive
+ # comment
+ foobar # another comment
+ }`,
+ expected: []Token{
+ {Line: 1, Text: "host:123"},
+ {Line: 1, Text: "{"},
+ {Line: 3, Text: "directive"},
+ {Line: 5, Text: "foobar"},
+ {Line: 6, Text: "}"},
+ },
+ },
+ {
+ input: `a "quoted value" b
+ foobar`,
+ expected: []Token{
+ {Line: 1, Text: "a"},
+ {Line: 1, Text: "quoted value"},
+ {Line: 1, Text: "b"},
+ {Line: 2, Text: "foobar"},
+ },
+ },
+ {
+ input: `A "quoted \"value\" inside" B`,
+ expected: []Token{
+ {Line: 1, Text: "A"},
+ {Line: 1, Text: `quoted "value" inside`},
+ {Line: 1, Text: "B"},
+ },
+ },
+ {
+ input: "A \"newline \\\ninside\" quotes",
+ expected: []Token{
+ {Line: 1, Text: "A"},
+ {Line: 1, Text: "newline \ninside"},
+ {Line: 2, Text: "quotes"},
+ },
+ },
+ {
+ input: `"don't\escape"`,
+ expected: []Token{
+ {Line: 1, Text: `don't\escape`},
+ },
+ },
+ {
+ input: `"don't\\escape"`,
+ expected: []Token{
+ {Line: 1, Text: `don't\\escape`},
+ },
+ },
+ {
+ input: `A "quoted value with line
+ break inside" {
+ foobar
+ }`,
+ expected: []Token{
+ {Line: 1, Text: "A"},
+ {Line: 1, Text: "quoted value with line\n\t\t\t\t\tbreak inside"},
+ {Line: 2, Text: "{"},
+ {Line: 3, Text: "foobar"},
+ {Line: 4, Text: "}"},
+ },
+ },
+ {
+ input: `"C:\php\php-cgi.exe"`,
+ expected: []Token{
+ {Line: 1, Text: `C:\php\php-cgi.exe`},
+ },
+ },
+ {
+ input: `empty "" string`,
+ expected: []Token{
+ {Line: 1, Text: `empty`},
+ {Line: 1, Text: ``},
+ {Line: 1, Text: `string`},
+ },
+ },
+ {
+ input: "skip those\r\nCR characters",
+ expected: []Token{
+ {Line: 1, Text: "skip"},
+ {Line: 1, Text: "those"},
+ {Line: 2, Text: "CR"},
+ {Line: 2, Text: "characters"},
+ },
+ },
+ {
+ input: "\xEF\xBB\xBF:8080", // test with leading byte order mark
+ expected: []Token{
+ {Line: 1, Text: ":8080"},
+ },
+ },
+ }
+
+ for i, testCase := range testCases {
+ actual := tokenize(testCase.input)
+ lexerCompare(t, i, testCase.expected, actual)
+ }
+}
+
+func tokenize(input string) (tokens []Token) {
+ l := lexer{}
+ if err := l.load(strings.NewReader(input)); err != nil {
+ log.Printf("[ERROR] load failed: %v", err)
+ }
+ for l.next() {
+ tokens = append(tokens, l.token)
+ }
+ return
+}
+
+func lexerCompare(t *testing.T, n int, expected, actual []Token) {
+ if len(expected) != len(actual) {
+ t.Errorf("Test case %d: expected %d token(s) but got %d", n, len(expected), len(actual))
+ }
+
+ for i := 0; i < len(actual) && i < len(expected); i++ {
+ if actual[i].Line != expected[i].Line {
+ t.Errorf("Test case %d token %d ('%s'): expected line %d but was line %d",
+ n, i, expected[i].Text, expected[i].Line, actual[i].Line)
+ break
+ }
+ if actual[i].Text != expected[i].Text {
+ t.Errorf("Test case %d token %d: expected text '%s' but was '%s'",
+ n, i, expected[i].Text, actual[i].Text)
+ break
+ }
+ }
+}
diff --git a/caddyconfig/caddyfile/parse.go b/caddyconfig/caddyfile/parse.go
new file mode 100755
index 0000000..cc7ed25
--- /dev/null
+++ b/caddyconfig/caddyfile/parse.go
@@ -0,0 +1,521 @@
+// Copyright 2015 Light Code Labs, LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// Parse parses the input just enough to group tokens, in
+// order, by server block. No further parsing is performed.
+// Server blocks are returned in the order in which they appear.
+// Directives that do not appear in validDirectives will cause
+// an error. If you do not want to check for valid directives,
+// pass in nil instead.
+func Parse(filename string, input io.Reader) ([]ServerBlock, error) {
+ tokens, err := allTokens(input)
+ if err != nil {
+ return nil, err
+ }
+ p := parser{Dispenser: NewDispenser(filename, tokens)}
+ return p.parseAll()
+}
+
+// allTokens lexes the entire input, but does not parse it.
+// It returns all the tokens from the input, unstructured
+// and in order.
+func allTokens(input io.Reader) ([]Token, error) {
+ l := new(lexer)
+ err := l.load(input)
+ if err != nil {
+ return nil, err
+ }
+ var tokens []Token
+ for l.next() {
+ tokens = append(tokens, l.token)
+ }
+ return tokens, nil
+}
+
+type parser struct {
+ *Dispenser
+ block ServerBlock // current server block being parsed
+ eof bool // if we encounter a valid EOF in a hard place
+ definedSnippets map[string][]Token
+ nesting int
+}
+
+func (p *parser) parseAll() ([]ServerBlock, error) {
+ var blocks []ServerBlock
+
+ for p.Next() {
+ err := p.parseOne()
+ if err != nil {
+ return blocks, err
+ }
+ if len(p.block.Keys) > 0 {
+ blocks = append(blocks, p.block)
+ }
+ if p.nesting > 0 {
+ return blocks, p.EOFErr()
+ }
+ }
+
+ return blocks, nil
+}
+
+func (p *parser) parseOne() error {
+ p.block = ServerBlock{}
+ return p.begin()
+}
+
+func (p *parser) begin() error {
+ if len(p.tokens) == 0 {
+ return nil
+ }
+
+ err := p.addresses()
+
+ if err != nil {
+ return err
+ }
+
+ if p.eof {
+ // this happens if the Caddyfile consists of only
+ // a line of addresses and nothing else
+ return nil
+ }
+
+ if ok, name := p.isSnippet(); ok {
+ if p.definedSnippets == nil {
+ p.definedSnippets = map[string][]Token{}
+ }
+ if _, found := p.definedSnippets[name]; found {
+ return p.Errf("redeclaration of previously declared snippet %s", name)
+ }
+ // consume all tokens til matched close brace
+ tokens, err := p.snippetTokens()
+ if err != nil {
+ return err
+ }
+ p.definedSnippets[name] = tokens
+ // empty block keys so we don't save this block as a real server.
+ p.block.Keys = nil
+ return nil
+ }
+
+ return p.blockContents()
+}
+
+func (p *parser) addresses() error {
+ var expectingAnother bool
+
+ for {
+ tkn := replaceEnvVars(p.Val())
+
+ // special case: import directive replaces tokens during parse-time
+ if tkn == "import" && p.isNewLine() {
+ err := p.doImport()
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Open brace definitely indicates end of addresses
+ if tkn == "{" {
+ if expectingAnother {
+ return p.Errf("Expected another address but had '%s' - check for extra comma", tkn)
+ }
+ break
+ }
+
+ if tkn != "" { // empty token possible if user typed ""
+ // Trailing comma indicates another address will follow, which
+ // may possibly be on the next line
+ if tkn[len(tkn)-1] == ',' {
+ tkn = tkn[:len(tkn)-1]
+ expectingAnother = true
+ } else {
+ expectingAnother = false // but we may still see another one on this line
+ }
+
+ p.block.Keys = append(p.block.Keys, tkn)
+ }
+
+ // Advance token and possibly break out of loop or return error
+ hasNext := p.Next()
+ if expectingAnother && !hasNext {
+ return p.EOFErr()
+ }
+ if !hasNext {
+ p.eof = true
+ break // EOF
+ }
+ if !expectingAnother && p.isNewLine() {
+ break
+ }
+ }
+
+ return nil
+}
+
+func (p *parser) blockContents() error {
+ errOpenCurlyBrace := p.openCurlyBrace()
+ if errOpenCurlyBrace != nil {
+ // single-server configs don't need curly braces
+ p.cursor--
+ }
+
+ err := p.directives()
+ if err != nil {
+ return err
+ }
+
+ // only look for close curly brace if there was an opening
+ if errOpenCurlyBrace == nil {
+ err = p.closeCurlyBrace()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// directives parses through all the lines for directives
+// and it expects the next token to be the first
+// directive. It goes until EOF or closing curly brace
+// which ends the server block.
+func (p *parser) directives() error {
+ for p.Next() {
+ // end of server block
+ if p.Val() == "}" {
+ // p.nesting has already been decremented
+ break
+ }
+
+ // special case: import directive replaces tokens during parse-time
+ if p.Val() == "import" {
+ err := p.doImport()
+ if err != nil {
+ return err
+ }
+ p.cursor-- // cursor is advanced when we continue, so roll back one more
+ continue
+ }
+
+ // normal case: parse a directive as a new segment
+ // (a "segment" is a line which starts with a directive
+ // and which ends at the end of the line or at the end of
+ // the block that is opened at the end of the line)
+ if err := p.directive(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// doImport swaps out the import directive and its argument
+// (a total of 2 tokens) with the tokens in the specified file
+// or globbing pattern. When the function returns, the cursor
+// is on the token before where the import directive was. In
+// other words, call Next() to access the first token that was
+// imported.
+func (p *parser) doImport() error {
+ // syntax checks
+ if !p.NextArg() {
+ return p.ArgErr()
+ }
+ importPattern := replaceEnvVars(p.Val())
+ if importPattern == "" {
+ return p.Err("Import requires a non-empty filepath")
+ }
+ if p.NextArg() {
+ return p.Err("Import takes only one argument (glob pattern or file)")
+ }
+ // splice out the import directive and its argument (2 tokens total)
+ tokensBefore := p.tokens[:p.cursor-1]
+ tokensAfter := p.tokens[p.cursor+1:]
+ var importedTokens []Token
+
+ // first check snippets. That is a simple, non-recursive replacement
+ if p.definedSnippets != nil && p.definedSnippets[importPattern] != nil {
+ importedTokens = p.definedSnippets[importPattern]
+ } else {
+ // make path relative to the file of the _token_ being processed rather
+ // than current working directory (issue #867) and then use glob to get
+ // list of matching filenames
+ absFile, err := filepath.Abs(p.Dispenser.File())
+ if err != nil {
+ return p.Errf("Failed to get absolute path of file: %s: %v", p.Dispenser.filename, err)
+ }
+
+ var matches []string
+ var globPattern string
+ if !filepath.IsAbs(importPattern) {
+ globPattern = filepath.Join(filepath.Dir(absFile), importPattern)
+ } else {
+ globPattern = importPattern
+ }
+ if strings.Count(globPattern, "*") > 1 || strings.Count(globPattern, "?") > 1 ||
+ (strings.Contains(globPattern, "[") && strings.Contains(globPattern, "]")) {
+ // See issue #2096 - a pattern with many glob expansions can hang for too long
+ return p.Errf("Glob pattern may only contain one wildcard (*), but has others: %s", globPattern)
+ }
+ matches, err = filepath.Glob(globPattern)
+
+ if err != nil {
+ return p.Errf("Failed to use import pattern %s: %v", importPattern, err)
+ }
+ if len(matches) == 0 {
+ if strings.ContainsAny(globPattern, "*?[]") {
+ log.Printf("[WARNING] No files matching import glob pattern: %s", importPattern)
+ } else {
+ return p.Errf("File to import not found: %s", importPattern)
+ }
+ }
+
+ // collect all the imported tokens
+
+ for _, importFile := range matches {
+ newTokens, err := p.doSingleImport(importFile)
+ if err != nil {
+ return err
+ }
+ importedTokens = append(importedTokens, newTokens...)
+ }
+ }
+
+ // splice the imported tokens in the place of the import statement
+ // and rewind cursor so Next() will land on first imported token
+ p.tokens = append(tokensBefore, append(importedTokens, tokensAfter...)...)
+ p.cursor--
+
+ return nil
+}
+
+// doSingleImport lexes the individual file at importFile and returns
+// its tokens or an error, if any.
+func (p *parser) doSingleImport(importFile string) ([]Token, error) {
+ file, err := os.Open(importFile)
+ if err != nil {
+ return nil, p.Errf("Could not import %s: %v", importFile, err)
+ }
+ defer file.Close()
+
+ if info, err := file.Stat(); err != nil {
+ return nil, p.Errf("Could not import %s: %v", importFile, err)
+ } else if info.IsDir() {
+ return nil, p.Errf("Could not import %s: is a directory", importFile)
+ }
+
+ importedTokens, err := allTokens(file)
+ if err != nil {
+ return nil, p.Errf("Could not read tokens while importing %s: %v", importFile, err)
+ }
+
+ // Tack the file path onto these tokens so errors show the imported file's name
+ // (we use full, absolute path to avoid bugs: issue #1892)
+ filename, err := filepath.Abs(importFile)
+ if err != nil {
+ return nil, p.Errf("Failed to get absolute path of file: %s: %v", p.Dispenser.filename, err)
+ }
+ for i := 0; i < len(importedTokens); i++ {
+ importedTokens[i].File = filename
+ }
+
+ return importedTokens, nil
+}
+
+// directive collects tokens until the directive's scope
+// closes (either end of line or end of curly brace block).
+// It expects the currently-loaded token to be a directive
+// (or } that ends a server block). The collected tokens
+// are loaded into the current server block for later use
+// by directive setup functions.
+func (p *parser) directive() error {
+ // evaluate any env vars in directive token
+ p.tokens[p.cursor].Text = replaceEnvVars(p.tokens[p.cursor].Text)
+
+ // a segment is a list of tokens associated with this directive
+ var segment Segment
+
+ // the directive itself is appended as a relevant token
+ segment = append(segment, p.Token())
+
+ for p.Next() {
+ if p.Val() == "{" {
+ p.nesting++
+ } else if p.isNewLine() && p.nesting == 0 {
+ p.cursor-- // read too far
+ break
+ } else if p.Val() == "}" && p.nesting > 0 {
+ p.nesting--
+ } else if p.Val() == "}" && p.nesting == 0 {
+ return p.Err("Unexpected '}' because no matching opening brace")
+ } else if p.Val() == "import" && p.isNewLine() {
+ if err := p.doImport(); err != nil {
+ return err
+ }
+ p.cursor-- // cursor is advanced when we continue, so roll back one more
+ continue
+ }
+ p.tokens[p.cursor].Text = replaceEnvVars(p.tokens[p.cursor].Text)
+ segment = append(segment, p.Token())
+ }
+
+ p.block.Segments = append(p.block.Segments, segment)
+
+ if p.nesting > 0 {
+ return p.EOFErr()
+ }
+
+ return nil
+}
+
+// openCurlyBrace expects the current token to be an
+// opening curly brace. This acts like an assertion
+// because it returns an error if the token is not
+// a opening curly brace. It does NOT advance the token.
+func (p *parser) openCurlyBrace() error {
+ if p.Val() != "{" {
+ return p.SyntaxErr("{")
+ }
+ return nil
+}
+
+// closeCurlyBrace expects the current token to be
+// a closing curly brace. This acts like an assertion
+// because it returns an error if the token is not
+// a closing curly brace. It does NOT advance the token.
+func (p *parser) closeCurlyBrace() error {
+ if p.Val() != "}" {
+ return p.SyntaxErr("}")
+ }
+ return nil
+}
+
+// replaceEnvVars replaces environment variables that appear in the token
+// and understands both the $UNIX and %WINDOWS% syntaxes.
+func replaceEnvVars(s string) string {
+ s = replaceEnvReferences(s, "{%", "%}")
+ s = replaceEnvReferences(s, "{$", "}")
+ return s
+}
+
+// replaceEnvReferences performs the actual replacement of env variables
+// in s, given the placeholder start and placeholder end strings.
+func replaceEnvReferences(s, refStart, refEnd string) string {
+ index := strings.Index(s, refStart)
+ for index != -1 {
+ endIndex := strings.Index(s[index:], refEnd)
+ if endIndex == -1 {
+ break
+ }
+
+ endIndex += index
+ if endIndex > index+len(refStart) {
+ ref := s[index : endIndex+len(refEnd)]
+ s = strings.Replace(s, ref, os.Getenv(ref[len(refStart):len(ref)-len(refEnd)]), -1)
+ } else {
+ return s
+ }
+ index = strings.Index(s, refStart)
+ }
+ return s
+}
+
+func (p *parser) isSnippet() (bool, string) {
+ keys := p.block.Keys
+ // A snippet block is a single key with parens. Nothing else qualifies.
+ if len(keys) == 1 && strings.HasPrefix(keys[0], "(") && strings.HasSuffix(keys[0], ")") {
+ return true, strings.TrimSuffix(keys[0][1:], ")")
+ }
+ return false, ""
+}
+
+// read and store everything in a block for later replay.
+func (p *parser) snippetTokens() ([]Token, error) {
+ // snippet must have curlies.
+ err := p.openCurlyBrace()
+ if err != nil {
+ return nil, err
+ }
+ nesting := 1 // count our own nesting in snippets
+ tokens := []Token{}
+ for p.Next() {
+ if p.Val() == "}" {
+ nesting--
+ if nesting == 0 {
+ break
+ }
+ }
+ if p.Val() == "{" {
+ nesting++
+ }
+ tokens = append(tokens, p.tokens[p.cursor])
+ }
+ // make sure we're matched up
+ if nesting != 0 {
+ return nil, p.SyntaxErr("}")
+ }
+ return tokens, nil
+}
+
+// ServerBlock associates any number of keys from the
+// head of the server block with tokens, which are
+// grouped by segments.
+type ServerBlock struct {
+ Keys []string
+ Segments []Segment
+}
+
+// DispenseDirective returns a dispenser that contains
+// all the tokens in the server block.
+func (sb ServerBlock) DispenseDirective(dir string) *Dispenser {
+ var tokens []Token
+ for _, seg := range sb.Segments {
+ if len(seg) > 0 && seg[0].Text == dir {
+ tokens = append(tokens, seg...)
+ }
+ }
+ return NewDispenser("", tokens)
+}
+
+// Segment is a list of tokens which begins with a directive
+// and ends at the end of the directive (either at the end of
+// the line, or at the end of a block it opens).
+type Segment []Token
+
+// Directive returns the directive name for the segment.
+// The directive name is the text of the first token.
+func (s Segment) Directive() string {
+ if len(s) > 0 {
+ return s[0].Text
+ }
+ return ""
+}
+
+// NewDispenser returns a dispenser for this
+// segment's tokens.
+func (s Segment) NewDispenser() *Dispenser {
+ return NewDispenser("", s)
+}
diff --git a/caddyconfig/caddyfile/parse_test.go b/caddyconfig/caddyfile/parse_test.go
new file mode 100755
index 0000000..19959de
--- /dev/null
+++ b/caddyconfig/caddyfile/parse_test.go
@@ -0,0 +1,681 @@
+// Copyright 2015 Light Code Labs, LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyfile
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+// TODO: re-enable all tests
+
+func TestAllTokens(t *testing.T) {
+ input := strings.NewReader("a b c\nd e")
+ expected := []string{"a", "b", "c", "d", "e"}
+ tokens, err := allTokens(input)
+
+ if err != nil {
+ t.Fatalf("Expected no error, got %v", err)
+ }
+ if len(tokens) != len(expected) {
+ t.Fatalf("Expected %d tokens, got %d", len(expected), len(tokens))
+ }
+
+ for i, val := range expected {
+ if tokens[i].Text != val {
+ t.Errorf("Token %d should be '%s' but was '%s'", i, val, tokens[i].Text)
+ }
+ }
+}
+
+func TestParseOneAndImport(t *testing.T) {
+ testParseOne := func(input string) (ServerBlock, error) {
+ p := testParser(input)
+ p.Next() // parseOne doesn't call Next() to start, so we must
+ err := p.parseOne()
+ return p.block, err
+ }
+
+ for i, test := range []struct {
+ input string
+ shouldErr bool
+ keys []string
+ numTokens []int // number of tokens to expect in each segment
+ }{
+ {`localhost`, false, []string{
+ "localhost",
+ }, []int{}},
+
+ {`localhost
+ dir1`, false, []string{
+ "localhost",
+ }, []int{1}},
+
+ {`localhost:1234
+ dir1 foo bar`, false, []string{
+ "localhost:1234",
+ }, []int{3},
+ },
+
+ {`localhost {
+ dir1
+ }`, false, []string{
+ "localhost",
+ }, []int{1}},
+
+ {`localhost:1234 {
+ dir1 foo bar
+ dir2
+ }`, false, []string{
+ "localhost:1234",
+ }, []int{3, 1}},
+
+ {`http://localhost https://localhost
+ dir1 foo bar`, false, []string{
+ "http://localhost",
+ "https://localhost",
+ }, []int{3}},
+
+ {`http://localhost https://localhost {
+ dir1 foo bar
+ }`, false, []string{
+ "http://localhost",
+ "https://localhost",
+ }, []int{3}},
+
+ {`http://localhost, https://localhost {
+ dir1 foo bar
+ }`, false, []string{
+ "http://localhost",
+ "https://localhost",
+ }, []int{3}},
+
+ {`http://localhost, {
+ }`, true, []string{
+ "http://localhost",
+ }, []int{}},
+
+ {`host1:80, http://host2.com
+ dir1 foo bar
+ dir2 baz`, false, []string{
+ "host1:80",
+ "http://host2.com",
+ }, []int{3, 2}},
+
+ {`http://host1.com,
+ http://host2.com,
+ https://host3.com`, false, []string{
+ "http://host1.com",
+ "http://host2.com",
+ "https://host3.com",
+ }, []int{}},
+
+ {`http://host1.com:1234, https://host2.com
+ dir1 foo {
+ bar baz
+ }
+ dir2`, false, []string{
+ "http://host1.com:1234",
+ "https://host2.com",
+ }, []int{6, 1}},
+
+ {`127.0.0.1
+ dir1 {
+ bar baz
+ }
+ dir2 {
+ foo bar
+ }`, false, []string{
+ "127.0.0.1",
+ }, []int{5, 5}},
+
+ {`localhost
+ dir1 {
+ foo`, true, []string{
+ "localhost",
+ }, []int{3}},
+
+ {`localhost
+ dir1 {
+ }`, false, []string{
+ "localhost",
+ }, []int{3}},
+
+ {`localhost
+ dir1 {
+ } }`, true, []string{
+ "localhost",
+ }, []int{}},
+
+ {`localhost
+ dir1 {
+ nested {
+ foo
+ }
+ }
+ dir2 foo bar`, false, []string{
+ "localhost",
+ }, []int{7, 3}},
+
+ {``, false, []string{}, []int{}},
+
+ {`localhost
+ dir1 arg1
+ import testdata/import_test1.txt`, false, []string{
+ "localhost",
+ }, []int{2, 3, 1}},
+
+ {`import testdata/import_test2.txt`, false, []string{
+ "host1",
+ }, []int{1, 2}},
+
+ {`import testdata/import_test1.txt testdata/import_test2.txt`, true, []string{}, []int{}},
+
+ {`import testdata/not_found.txt`, true, []string{}, []int{}},
+
+ {`""`, false, []string{}, []int{}},
+
+ {``, false, []string{}, []int{}},
+
+ // test cases found by fuzzing!
+ {`import }{$"`, true, []string{}, []int{}},
+ {`import /*/*.txt`, true, []string{}, []int{}},
+ {`import /???/?*?o`, true, []string{}, []int{}},
+ {`import /??`, true, []string{}, []int{}},
+ {`import /[a-z]`, true, []string{}, []int{}},
+ {`import {$}`, true, []string{}, []int{}},
+ {`import {%}`, true, []string{}, []int{}},
+ {`import {$$}`, true, []string{}, []int{}},
+ {`import {%%}`, true, []string{}, []int{}},
+ } {
+ result, err := testParseOne(test.input)
+
+ if test.shouldErr && err == nil {
+ t.Errorf("Test %d: Expected an error, but didn't get one", i)
+ }
+ if !test.shouldErr && err != nil {
+ t.Errorf("Test %d: Expected no error, but got: %v", i, err)
+ }
+
+ if len(result.Keys) != len(test.keys) {
+ t.Errorf("Test %d: Expected %d keys, got %d",
+ i, len(test.keys), len(result.Keys))
+ continue
+ }
+ for j, addr := range result.Keys {
+ if addr != test.keys[j] {
+ t.Errorf("Test %d, key %d: Expected '%s', but was '%s'",
+ i, j, test.keys[j], addr)
+ }
+ }
+
+ if len(result.Segments) != len(test.numTokens) {
+ t.Errorf("Test %d: Expected %d segments, had %d",
+ i, len(test.numTokens), len(result.Segments))
+ continue
+ }
+
+ for j, seg := range result.Segments {
+ if len(seg) != test.numTokens[j] {
+ t.Errorf("Test %d, segment %d: Expected %d tokens, counted %d",
+ i, j, test.numTokens[j], len(seg))
+ continue
+ }
+ }
+ }
+}
+
+func TestRecursiveImport(t *testing.T) {
+ testParseOne := func(input string) (ServerBlock, error) {
+ p := testParser(input)
+ p.Next() // parseOne doesn't call Next() to start, so we must
+ err := p.parseOne()
+ return p.block, err
+ }
+
+ isExpected := func(got ServerBlock) bool {
+ if len(got.Keys) != 1 || got.Keys[0] != "localhost" {
+ t.Errorf("got keys unexpected: expect localhost, got %v", got.Keys)
+ return false
+ }
+ if len(got.Segments) != 2 {
+ t.Errorf("got wrong number of segments: expect 2, got %d", len(got.Segments))
+ return false
+ }
+ if len(got.Segments[0]) != 1 || len(got.Segments[1]) != 2 {
+ t.Errorf("got unexpect tokens: %v", got.Segments)
+ return false
+ }
+ return true
+ }
+
+ recursiveFile1, err := filepath.Abs("testdata/recursive_import_test1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ recursiveFile2, err := filepath.Abs("testdata/recursive_import_test2")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // test relative recursive import
+ err = ioutil.WriteFile(recursiveFile1, []byte(
+ `localhost
+ dir1
+ import recursive_import_test2`), 0644)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(recursiveFile1)
+
+ err = ioutil.WriteFile(recursiveFile2, []byte("dir2 1"), 0644)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(recursiveFile2)
+
+ // import absolute path
+ result, err := testParseOne("import " + recursiveFile1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !isExpected(result) {
+ t.Error("absolute+relative import failed")
+ }
+
+ // import relative path
+ result, err = testParseOne("import testdata/recursive_import_test1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !isExpected(result) {
+ t.Error("relative+relative import failed")
+ }
+
+ // test absolute recursive import
+ err = ioutil.WriteFile(recursiveFile1, []byte(
+ `localhost
+ dir1
+ import `+recursiveFile2), 0644)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // import absolute path
+ result, err = testParseOne("import " + recursiveFile1)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !isExpected(result) {
+ t.Error("absolute+absolute import failed")
+ }
+
+ // import relative path
+ result, err = testParseOne("import testdata/recursive_import_test1")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !isExpected(result) {
+ t.Error("relative+absolute import failed")
+ }
+}
+
+func TestDirectiveImport(t *testing.T) {
+ testParseOne := func(input string) (ServerBlock, error) {
+ p := testParser(input)
+ p.Next() // parseOne doesn't call Next() to start, so we must
+ err := p.parseOne()
+ return p.block, err
+ }
+
+ isExpected := func(got ServerBlock) bool {
+ if len(got.Keys) != 1 || got.Keys[0] != "localhost" {
+ t.Errorf("got keys unexpected: expect localhost, got %v", got.Keys)
+ return false
+ }
+ if len(got.Segments) != 2 {
+ t.Errorf("got wrong number of segments: expect 2, got %d", len(got.Segments))
+ return false
+ }
+ if len(got.Segments[0]) != 1 || len(got.Segments[1]) != 8 {
+ t.Errorf("got unexpect tokens: %v", got.Segments)
+ return false
+ }
+ return true
+ }
+
+ directiveFile, err := filepath.Abs("testdata/directive_import_test")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ err = ioutil.WriteFile(directiveFile, []byte(`prop1 1
+ prop2 2`), 0644)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(directiveFile)
+
+ // import from existing file
+ result, err := testParseOne(`localhost
+ dir1
+ proxy {
+ import testdata/directive_import_test
+ transparent
+ }`)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !isExpected(result) {
+ t.Error("directive import failed")
+ }
+
+ // import from nonexistent file
+ _, err = testParseOne(`localhost
+ dir1
+ proxy {
+ import testdata/nonexistent_file
+ transparent
+ }`)
+ if err == nil {
+ t.Fatal("expected error when importing a nonexistent file")
+ }
+}
+
+func TestParseAll(t *testing.T) {
+ for i, test := range []struct {
+ input string
+ shouldErr bool
+ keys [][]string // keys per server block, in order
+ }{
+ {`localhost`, false, [][]string{
+ {"localhost"},
+ }},
+
+ {`localhost:1234`, false, [][]string{
+ {"localhost:1234"},
+ }},
+
+ {`localhost:1234 {
+ }
+ localhost:2015 {
+ }`, false, [][]string{
+ {"localhost:1234"},
+ {"localhost:2015"},
+ }},
+
+ {`localhost:1234, http://host2`, false, [][]string{
+ {"localhost:1234", "http://host2"},
+ }},
+
+ {`localhost:1234, http://host2,`, true, [][]string{}},
+
+ {`http://host1.com, http://host2.com {
+ }
+ https://host3.com, https://host4.com {
+ }`, false, [][]string{
+ {"http://host1.com", "http://host2.com"},
+ {"https://host3.com", "https://host4.com"},
+ }},
+
+ {`import testdata/import_glob*.txt`, false, [][]string{
+ {"glob0.host0"},
+ {"glob0.host1"},
+ {"glob1.host0"},
+ {"glob2.host0"},
+ }},
+
+ {`import notfound/*`, false, [][]string{}}, // glob needn't error with no matches
+ {`import notfound/file.conf`, true, [][]string{}}, // but a specific file should
+ } {
+ p := testParser(test.input)
+ blocks, err := p.parseAll()
+
+ if test.shouldErr && err == nil {
+ t.Errorf("Test %d: Expected an error, but didn't get one", i)
+ }
+ if !test.shouldErr && err != nil {
+ t.Errorf("Test %d: Expected no error, but got: %v", i, err)
+ }
+
+ if len(blocks) != len(test.keys) {
+ t.Errorf("Test %d: Expected %d server blocks, got %d",
+ i, len(test.keys), len(blocks))
+ continue
+ }
+ for j, block := range blocks {
+ if len(block.Keys) != len(test.keys[j]) {
+ t.Errorf("Test %d: Expected %d keys in block %d, got %d",
+ i, len(test.keys[j]), j, len(block.Keys))
+ continue
+ }
+ for k, addr := range block.Keys {
+ if addr != test.keys[j][k] {
+ t.Errorf("Test %d, block %d, key %d: Expected '%s', but got '%s'",
+ i, j, k, test.keys[j][k], addr)
+ }
+ }
+ }
+ }
+}
+
+func TestEnvironmentReplacement(t *testing.T) {
+ os.Setenv("PORT", "8080")
+ os.Setenv("ADDRESS", "servername.com")
+ os.Setenv("FOOBAR", "foobar")
+ os.Setenv("PARTIAL_DIR", "r1")
+
+ // basic test; unix-style env vars
+ p := testParser(`{$ADDRESS}`)
+ blocks, _ := p.parseAll()
+ if actual, expected := blocks[0].Keys[0], "servername.com"; expected != actual {
+ t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
+ }
+
+ // basic test; unix-style env vars
+ p = testParser(`di{$PARTIAL_DIR}`)
+ blocks, _ = p.parseAll()
+ if actual, expected := blocks[0].Keys[0], "dir1"; expected != actual {
+ t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
+ }
+
+ // multiple vars per token
+ p = testParser(`{$ADDRESS}:{$PORT}`)
+ blocks, _ = p.parseAll()
+ if actual, expected := blocks[0].Keys[0], "servername.com:8080"; expected != actual {
+ t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
+ }
+
+ // windows-style var and unix style in same token
+ p = testParser(`{%ADDRESS%}:{$PORT}`)
+ blocks, _ = p.parseAll()
+ if actual, expected := blocks[0].Keys[0], "servername.com:8080"; expected != actual {
+ t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
+ }
+
+ // reverse order
+ p = testParser(`{$ADDRESS}:{%PORT%}`)
+ blocks, _ = p.parseAll()
+ if actual, expected := blocks[0].Keys[0], "servername.com:8080"; expected != actual {
+ t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
+ }
+
+ // env var in server block body as argument
+ p = testParser(":{%PORT%}\ndir1 {$FOOBAR}")
+ blocks, _ = p.parseAll()
+ if actual, expected := blocks[0].Keys[0], ":8080"; expected != actual {
+ t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
+ }
+ if actual, expected := blocks[0].Segments[0][1].Text, "foobar"; expected != actual {
+ t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
+ }
+
+ // combined windows env vars in argument
+ p = testParser(":{%PORT%}\ndir1 {%ADDRESS%}/{%FOOBAR%}")
+ blocks, _ = p.parseAll()
+ if actual, expected := blocks[0].Segments[0][1].Text, "servername.com/foobar"; expected != actual {
+ t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
+ }
+
+ // malformed env var (windows)
+ p = testParser(":1234\ndir1 {%ADDRESS}")
+ blocks, _ = p.parseAll()
+ if actual, expected := blocks[0].Segments[0][1].Text, "{%ADDRESS}"; expected != actual {
+ t.Errorf("Expected host to be '%s' but was '%s'", expected, actual)
+ }
+
+ // malformed (non-existent) env var (unix)
+ p = testParser(`:{$PORT$}`)
+ blocks, _ = p.parseAll()
+ if actual, expected := blocks[0].Keys[0], ":"; expected != actual {
+ t.Errorf("Expected key to be '%s' but was '%s'", expected, actual)
+ }
+
+ // in quoted field
+ p = testParser(":1234\ndir1 \"Test {$FOOBAR} test\"")
+ blocks, _ = p.parseAll()
+ if actual, expected := blocks[0].Segments[0][1].Text, "Test foobar test"; expected != actual {
+ t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
+ }
+
+ // after end token
+ p = testParser(":1234\nanswer \"{{ .Name }} {$FOOBAR}\"")
+ blocks, _ = p.parseAll()
+ if actual, expected := blocks[0].Segments[0][1].Text, "{{ .Name }} foobar"; expected != actual {
+ t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
+ }
+}
+
+func TestSnippets(t *testing.T) {
+ p := testParser(`
+ (common) {
+ gzip foo
+ errors stderr
+ }
+ http://example.com {
+ import common
+ }
+ `)
+ blocks, err := p.parseAll()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, b := range blocks {
+ t.Log(b.Keys)
+ t.Log(b.Segments)
+ }
+ if len(blocks) != 1 {
+ t.Fatalf("Expect exactly one server block. Got %d.", len(blocks))
+ }
+ if actual, expected := blocks[0].Keys[0], "http://example.com"; expected != actual {
+ t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual)
+ }
+ if len(blocks[0].Segments) != 2 {
+ t.Fatalf("Server block should have tokens from import, got: %+v", blocks[0])
+ }
+ if actual, expected := blocks[0].Segments[0][0].Text, "gzip"; expected != actual {
+ t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
+ }
+ if actual, expected := blocks[0].Segments[1][1].Text, "stderr"; expected != actual {
+ t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
+ }
+}
+
+func writeStringToTempFileOrDie(t *testing.T, str string) (pathToFile string) {
+ file, err := ioutil.TempFile("", t.Name())
+ if err != nil {
+ panic(err) // get a stack trace so we know where this was called from.
+ }
+ if _, err := file.WriteString(str); err != nil {
+ panic(err)
+ }
+ if err := file.Close(); err != nil {
+ panic(err)
+ }
+ return file.Name()
+}
+
+func TestImportedFilesIgnoreNonDirectiveImportTokens(t *testing.T) {
+ fileName := writeStringToTempFileOrDie(t, `
+ http://example.com {
+ # This isn't an import directive, it's just an arg with value 'import'
+ basicauth / import password
+ }
+ `)
+ // Parse the root file that imports the other one.
+ p := testParser(`import ` + fileName)
+ blocks, err := p.parseAll()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, b := range blocks {
+ t.Log(b.Keys)
+ t.Log(b.Segments)
+ }
+ auth := blocks[0].Segments[0]
+ line := auth[0].Text + " " + auth[1].Text + " " + auth[2].Text + " " + auth[3].Text
+ if line != "basicauth / import password" {
+ // Previously, it would be changed to:
+ // basicauth / import /path/to/test/dir/password
+ // referencing a file that (probably) doesn't exist and changing the
+ // password!
+ t.Errorf("Expected basicauth tokens to be 'basicauth / import password' but got %#q", line)
+ }
+}
+
+func TestSnippetAcrossMultipleFiles(t *testing.T) {
+ // Make the derived Caddyfile that expects (common) to be defined.
+ fileName := writeStringToTempFileOrDie(t, `
+ http://example.com {
+ import common
+ }
+ `)
+
+ // Parse the root file that defines (common) and then imports the other one.
+ p := testParser(`
+ (common) {
+ gzip foo
+ }
+ import ` + fileName + `
+ `)
+
+ blocks, err := p.parseAll()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, b := range blocks {
+ t.Log(b.Keys)
+ t.Log(b.Segments)
+ }
+ if len(blocks) != 1 {
+ t.Fatalf("Expect exactly one server block. Got %d.", len(blocks))
+ }
+ if actual, expected := blocks[0].Keys[0], "http://example.com"; expected != actual {
+ t.Errorf("Expected server name to be '%s' but was '%s'", expected, actual)
+ }
+ if len(blocks[0].Segments) != 1 {
+ t.Fatalf("Server block should have tokens from import")
+ }
+ if actual, expected := blocks[0].Segments[0][0].Text, "gzip"; expected != actual {
+ t.Errorf("Expected argument to be '%s' but was '%s'", expected, actual)
+ }
+}
+
+func testParser(input string) parser {
+ return parser{Dispenser: newTestDispenser(input)}
+}
diff --git a/caddyconfig/caddyfile/testdata/import_glob0.txt b/caddyconfig/caddyfile/testdata/import_glob0.txt
new file mode 100755
index 0000000..e610b5e
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_glob0.txt
@@ -0,0 +1,6 @@
+glob0.host0 {
+ dir2 arg1
+}
+
+glob0.host1 {
+}
diff --git a/caddyconfig/caddyfile/testdata/import_glob1.txt b/caddyconfig/caddyfile/testdata/import_glob1.txt
new file mode 100755
index 0000000..111eb04
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_glob1.txt
@@ -0,0 +1,4 @@
+glob1.host0 {
+ dir1
+ dir2 arg1
+}
diff --git a/caddyconfig/caddyfile/testdata/import_glob2.txt b/caddyconfig/caddyfile/testdata/import_glob2.txt
new file mode 100755
index 0000000..c09f784
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_glob2.txt
@@ -0,0 +1,3 @@
+glob2.host0 {
+ dir2 arg1
+}
diff --git a/caddyconfig/caddyfile/testdata/import_test1.txt b/caddyconfig/caddyfile/testdata/import_test1.txt
new file mode 100755
index 0000000..dac7b29
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_test1.txt
@@ -0,0 +1,2 @@
+dir2 arg1 arg2
+dir3 \ No newline at end of file
diff --git a/caddyconfig/caddyfile/testdata/import_test2.txt b/caddyconfig/caddyfile/testdata/import_test2.txt
new file mode 100755
index 0000000..140c879
--- /dev/null
+++ b/caddyconfig/caddyfile/testdata/import_test2.txt
@@ -0,0 +1,4 @@
+host1 {
+ dir1
+ dir2 arg1
+} \ No newline at end of file
diff --git a/caddyconfig/configadapters.go b/caddyconfig/configadapters.go
new file mode 100644
index 0000000..6e5d530
--- /dev/null
+++ b/caddyconfig/configadapters.go
@@ -0,0 +1,113 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package caddyconfig
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// Adapter is a type which can adapt a configuration to Caddy JSON.
+// It returns the results and any warnings, or an error.
+type Adapter interface {
+ Adapt(body []byte, options map[string]string) ([]byte, []Warning, error)
+}
+
+// Warning represents a warning or notice related to conversion.
+type Warning struct {
+ File string
+ Line int
+ Directive string
+ Message string
+}
+
+// JSON encodes val as JSON, returning it as a json.RawMessage. Any
+// marshaling errors (which are highly unlikely with correct code)
+// are converted to warnings. This is convenient when filling config
+// structs that require a json.RawMessage, without having to worry
+// about errors.
+func JSON(val interface{}, warnings *[]Warning) json.RawMessage {
+ b, err := json.Marshal(val)
+ if err != nil {
+ if warnings != nil {
+ *warnings = append(*warnings, Warning{Message: err.Error()})
+ }
+ return nil
+ }
+ return b
+}
+
+// JSONModuleObject is like JSON, except it marshals val into a JSON object
+// and then adds a key to that object named fieldName with the value fieldVal.
+// This is useful for JSON-encoding module values where the module name has to
+// be described within the object by a certain key; for example,
+// "responder": "file_server" for a file server HTTP responder. The val must
+// encode into a map[string]interface{} (i.e. it must be a struct or map),
+// and any errors are converted into warnings, so this can be conveniently
+// used when filling a struct. For correct code, there should be no errors.
+func JSONModuleObject(val interface{}, fieldName, fieldVal string, warnings *[]Warning) json.RawMessage {
+ // encode to a JSON object first
+ enc, err := json.Marshal(val)
+ if err != nil {
+ if warnings != nil {
+ *warnings = append(*warnings, Warning{Message: err.Error()})
+ }
+ return nil
+ }
+
+ // then decode the object
+ var tmp map[string]interface{}
+ err = json.Unmarshal(enc, &tmp)
+ if err != nil {
+ if warnings != nil {
+ *warnings = append(*warnings, Warning{Message: err.Error()})
+ }
+ return nil
+ }
+
+ // so we can easily add the module's field with its appointed value
+ tmp[fieldName] = fieldVal
+
+ // then re-marshal as JSON
+ result, err := json.Marshal(tmp)
+ if err != nil {
+ if warnings != nil {
+ *warnings = append(*warnings, Warning{Message: err.Error()})
+ }
+ return nil
+ }
+
+ return result
+}
+
+// JSONIndent is used to JSON-marshal the final resulting Caddy
+// configuration in a consistent, human-readable way.
+func JSONIndent(val interface{}) ([]byte, error) {
+ return json.MarshalIndent(val, "", "\t")
+}
+
+func RegisterAdapter(name string, adapter Adapter) error {
+ if _, ok := configAdapters[name]; ok {
+ return fmt.Errorf("%s: already registered", name)
+ }
+ configAdapters[name] = adapter
+ return nil
+}
+
+func GetAdapter(name string) Adapter {
+ return configAdapters[name]
+}
+
+var configAdapters = make(map[string]Adapter)
diff --git a/caddyconfig/httpcaddyfile/addresses.go b/caddyconfig/httpcaddyfile/addresses.go
new file mode 100644
index 0000000..2adb818
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/addresses.go
@@ -0,0 +1,332 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/caddyserver/caddy/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/mholt/certmagic"
+)
+
+// mapAddressToServerBlocks returns a map of listener address to list of server
+// blocks that will be served on that address. To do this, each server block is
+// expanded so that each one is considered individually, although keys of a
+// server block that share the same address stay grouped together so the config
+// isn't repeated unnecessarily. For example, this Caddyfile:
+//
+// example.com {
+// bind 127.0.0.1
+// }
+// www.example.com, example.net/path, localhost:9999 {
+// bind 127.0.0.1 1.2.3.4
+// }
+//
+// has two server blocks to start with. But expressed in this Caddyfile are
+// actually 4 listener addresses: 127.0.0.1:443, 1.2.3.4:443, 127.0.0.1:9999,
+// and 127.0.0.1:9999. This is because the bind directive is applied to each
+// key of its server block (specifying the host part), and each key may have
+// a different port. And we definitely need to be sure that a site which is
+// bound to be served on a specific interface is not served on others just
+// beceause that is more convenient: it would be a potential security risk
+// if the difference between interfaces means private vs. public.
+//
+// So what this function does for the example above is iterate each server
+// block, and for each server block, iterate its keys. For the first, it
+// finds one key (example.com) and determines its listener address
+// (127.0.0.1:443 - because of 'bind' and automatic HTTPS). It then adds
+// the listener address to the map value returned by this function, with
+// the first server block as one of its associations.
+//
+// It then iterates each key on the second server block and associates them
+// with one or more listener addresses. Indeed, each key in this block has
+// two listener addresses because of the 'bind' directive. Once we know
+// which addresses serve which keys, we can create a new server block for
+// each address containing the contents of the server block and only those
+// specific keys of the server block which use that address.
+//
+// It is possible and even likely that some keys in the returned map have
+// the exact same list of server blocks (i.e. they are identical). This
+// happens when multiple hosts are declared with a 'bind' directive and
+// the resulting listener addresses are not shared by any other server
+// block (or the other server blocks are exactly identical in their token
+// contents). This happens with our example above because 1.2.3.4:443
+// and 1.2.3.4:9999 are used exclusively with the second server block. This
+// repetition may be undesirable, so call consolidateAddrMappings() to map
+// multiple addresses to the same lists of server blocks (a many:many mapping).
+// (Doing this is essentially a map-reduce technique.)
+func (st *ServerType) mapAddressToServerBlocks(originalServerBlocks []serverBlock) (map[string][]serverBlock, error) {
+ sbmap := make(map[string][]serverBlock)
+
+ for i, sblock := range originalServerBlocks {
+ // within a server block, we need to map all the listener addresses
+ // implied by the server block to the keys of the server block which
+ // will be served by them; this has the effect of treating each
+ // key of a server block as its own, but without having to repeat its
+ // contents in cases where multiple keys really can be served together
+ addrToKeys := make(map[string][]string)
+ for j, key := range sblock.block.Keys {
+ // a key can have multiple listener addresses if there are multiple
+ // arguments to the 'bind' directive (although they will all have
+ // the same port, since the port is defined by the key or is implicit
+ // through automatic HTTPS)
+ addrs, err := st.listenerAddrsForServerBlockKey(sblock, key)
+ if err != nil {
+ return nil, fmt.Errorf("server block %d, key %d (%s): determining listener address: %v", i, j, key, err)
+ }
+
+ // associate this key with each listener address it is served on
+ for _, addr := range addrs {
+ addrToKeys[addr] = append(addrToKeys[addr], key)
+ }
+ }
+
+ // now that we know which addresses serve which keys of this
+ // server block, we iterate that mapping and create a list of
+ // new server blocks for each address where the keys of the
+ // server block are only the ones which use the address; but
+ // the contents (tokens) are of course the same
+ for addr, keys := range addrToKeys {
+ sbmap[addr] = append(sbmap[addr], serverBlock{
+ block: caddyfile.ServerBlock{
+ Keys: keys,
+ Segments: sblock.block.Segments,
+ },
+ pile: sblock.pile,
+ })
+ }
+ }
+
+ return sbmap, nil
+}
+
+// consolidateAddrMappings eliminates repetition of identical server blocks in a mapping of
+// single listener addresses to lists of server blocks. Since multiple addresses may serve
+// identical sites (server block contents), this function turns a 1:many mapping into a
+// many:many mapping. Server block contents (tokens) must be exactly identical so that
+// reflect.DeepEqual returns true in order for the addresses to be combined. Identical
+// entries are deleted from the addrToServerBlocks map. Essentially, each pairing (each
+// association from multiple addresses to multiple server blocks; i.e. each element of
+// the returned slice) becomes a server definition in the output JSON.
+func (st *ServerType) consolidateAddrMappings(addrToServerBlocks map[string][]serverBlock) []sbAddrAssociation {
+ var sbaddrs []sbAddrAssociation
+ for addr, sblocks := range addrToServerBlocks {
+ // we start with knowing that at least this address
+ // maps to these server blocks
+ a := sbAddrAssociation{
+ addresses: []string{addr},
+ serverBlocks: sblocks,
+ }
+
+ // now find other addresses that map to identical
+ // server blocks and add them to our list of
+ // addresses, while removing them from the map
+ for otherAddr, otherSblocks := range addrToServerBlocks {
+ if addr == otherAddr {
+ continue
+ }
+ if reflect.DeepEqual(sblocks, otherSblocks) {
+ a.addresses = append(a.addresses, otherAddr)
+ delete(addrToServerBlocks, otherAddr)
+ }
+ }
+
+ sbaddrs = append(sbaddrs, a)
+ }
+ return sbaddrs
+}
+
+func (st *ServerType) listenerAddrsForServerBlockKey(sblock serverBlock, key string) ([]string, error) {
+ addr, err := ParseAddress(key)
+ if err != nil {
+ return nil, fmt.Errorf("parsing key: %v", err)
+ }
+ addr = addr.Normalize()
+
+ lnPort := defaultPort
+ if addr.Port != "" {
+ // port explicitly defined
+ lnPort = addr.Port
+ } else if certmagic.HostQualifies(addr.Host) {
+ // automatic HTTPS
+ lnPort = strconv.Itoa(certmagic.HTTPSPort)
+ }
+
+ // the bind directive specifies hosts, but is optional
+ var lnHosts []string
+ for _, cfgVal := range sblock.pile["bind"] {
+ lnHosts = append(lnHosts, cfgVal.Value.([]string)...)
+ }
+ if len(lnHosts) == 0 {
+ lnHosts = []string{""}
+ }
+
+ // use a map to prevent duplication
+ listeners := make(map[string]struct{})
+ for _, host := range lnHosts {
+ listeners[net.JoinHostPort(host, lnPort)] = struct{}{}
+ }
+
+ // now turn map into list
+ var listenersList []string
+ for lnStr := range listeners {
+ listenersList = append(listenersList, lnStr)
+ }
+ // sort.Strings(listenersList) // TODO: is sorting necessary?
+
+ return listenersList, nil
+}
+
+// Address represents a site address. It contains
+// the original input value, and the component
+// parts of an address. The component parts may be
+// updated to the correct values as setup proceeds,
+// but the original value should never be changed.
+//
+// The Host field must be in a normalized form.
+type Address struct {
+ Original, Scheme, Host, Port, Path string
+}
+
+// ParseAddress parses an address string into a structured format with separate
+// scheme, host, port, and path portions, as well as the original input string.
+func ParseAddress(str string) (Address, error) {
+ httpPort, httpsPort := strconv.Itoa(certmagic.HTTPPort), strconv.Itoa(certmagic.HTTPSPort)
+
+ input := str
+
+ // Split input into components (prepend with // to force host portion by default)
+ if !strings.Contains(str, "//") && !strings.HasPrefix(str, "/") {
+ str = "//" + str
+ }
+
+ u, err := url.Parse(str)
+ if err != nil {
+ return Address{}, err
+ }
+
+ // separate host and port
+ host, port, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ host, port, err = net.SplitHostPort(u.Host + ":")
+ if err != nil {
+ host = u.Host
+ }
+ }
+
+ // see if we can set port based off scheme
+ if port == "" {
+ if u.Scheme == "http" {
+ port = httpPort
+ } else if u.Scheme == "https" {
+ port = httpsPort
+ }
+ }
+
+ // error if scheme and port combination violate convention
+ if (u.Scheme == "http" && port == httpsPort) || (u.Scheme == "https" && port == httpPort) {
+ return Address{}, fmt.Errorf("[%s] scheme and port violate convention", input)
+ }
+
+ return Address{Original: input, Scheme: u.Scheme, Host: host, Port: port, Path: u.Path}, err
+}
+
+// TODO: which of the methods on Address are even used?
+
+// String returns a human-readable form of a. It will
+// be a cleaned-up and filled-out URL string.
+func (a Address) String() string {
+ if a.Host == "" && a.Port == "" {
+ return ""
+ }
+ scheme := a.Scheme
+ if scheme == "" {
+ if a.Port == strconv.Itoa(certmagic.HTTPSPort) {
+ scheme = "https"
+ } else {
+ scheme = "http"
+ }
+ }
+ s := scheme
+ if s != "" {
+ s += "://"
+ }
+ if a.Port != "" &&
+ ((scheme == "https" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPSPort)) ||
+ (scheme == "http" && a.Port != strconv.Itoa(caddyhttp.DefaultHTTPPort))) {
+ s += net.JoinHostPort(a.Host, a.Port)
+ } else {
+ s += a.Host
+ }
+ if a.Path != "" {
+ s += a.Path
+ }
+ return s
+}
+
+// Normalize returns a normalized version of a.
+func (a Address) Normalize() Address {
+ path := a.Path
+ if !caseSensitivePath {
+ path = strings.ToLower(path)
+ }
+
+ // ensure host is normalized if it's an IP address
+ host := a.Host
+ if ip := net.ParseIP(host); ip != nil {
+ host = ip.String()
+ }
+
+ return Address{
+ Original: a.Original,
+ Scheme: strings.ToLower(a.Scheme),
+ Host: strings.ToLower(host),
+ Port: a.Port,
+ Path: path,
+ }
+}
+
+// Key returns a string form of a, much like String() does, but this
+// method doesn't add anything default that wasn't in the original.
+func (a Address) Key() string {
+ res := ""
+ if a.Scheme != "" {
+ res += a.Scheme + "://"
+ }
+ if a.Host != "" {
+ res += a.Host
+ }
+ // insert port only if the original has its own explicit port
+ if a.Port != "" &&
+ len(a.Original) >= len(res) &&
+ strings.HasPrefix(a.Original[len(res):], ":"+a.Port) {
+ res += ":" + a.Port
+ }
+ if a.Path != "" {
+ res += a.Path
+ }
+ return res
+}
+
+const (
+ defaultPort = "2015"
+ caseSensitivePath = false // TODO: Used?
+)
diff --git a/caddyconfig/httpcaddyfile/addresses_test.go b/caddyconfig/httpcaddyfile/addresses_test.go
new file mode 100644
index 0000000..d6aa6f6
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/addresses_test.go
@@ -0,0 +1,166 @@
+package httpcaddyfile
+
+import (
+ "strings"
+ "testing"
+)
+
+func TestParseAddress(t *testing.T) {
+ for i, test := range []struct {
+ input string
+ scheme, host, port, path string
+ shouldErr bool
+ }{
+ {`localhost`, "", "localhost", "", "", false},
+ {`localhost:1234`, "", "localhost", "1234", "", false},
+ {`localhost:`, "", "localhost", "", "", false},
+ {`0.0.0.0`, "", "0.0.0.0", "", "", false},
+ {`127.0.0.1:1234`, "", "127.0.0.1", "1234", "", false},
+ {`:1234`, "", "", "1234", "", false},
+ {`[::1]`, "", "::1", "", "", false},
+ {`[::1]:1234`, "", "::1", "1234", "", false},
+ {`:`, "", "", "", "", false},
+ {`:http`, "", "", "", "", true},
+ {`:https`, "", "", "", "", true},
+ {`localhost:http`, "", "", "", "", true}, // using service name in port is verboten, as of Go 1.12.8
+ {`localhost:https`, "", "", "", "", true},
+ {`http://localhost:https`, "", "", "", "", true}, // conflict
+ {`http://localhost:http`, "", "", "", "", true}, // repeated scheme
+ {`host:https/path`, "", "", "", "", true},
+ {`http://localhost:443`, "", "", "", "", true}, // not conventional
+ {`https://localhost:80`, "", "", "", "", true}, // not conventional
+ {`http://localhost`, "http", "localhost", "80", "", false},
+ {`https://localhost`, "https", "localhost", "443", "", false},
+ {`http://127.0.0.1`, "http", "127.0.0.1", "80", "", false},
+ {`https://127.0.0.1`, "https", "127.0.0.1", "443", "", false},
+ {`http://[::1]`, "http", "::1", "80", "", false},
+ {`http://localhost:1234`, "http", "localhost", "1234", "", false},
+ {`https://127.0.0.1:1234`, "https", "127.0.0.1", "1234", "", false},
+ {`http://[::1]:1234`, "http", "::1", "1234", "", false},
+ {``, "", "", "", "", false},
+ {`::1`, "", "::1", "", "", true},
+ {`localhost::`, "", "localhost::", "", "", true},
+ {`#$%@`, "", "", "", "", true},
+ {`host/path`, "", "host", "", "/path", false},
+ {`http://host/`, "http", "host", "80", "/", false},
+ {`//asdf`, "", "asdf", "", "", false},
+ {`:1234/asdf`, "", "", "1234", "/asdf", false},
+ {`http://host/path`, "http", "host", "80", "/path", false},
+ {`https://host:443/path/foo`, "https", "host", "443", "/path/foo", false},
+ {`host:80/path`, "", "host", "80", "/path", false},
+ {`/path`, "", "", "", "/path", false},
+ } {
+ actual, err := ParseAddress(test.input)
+
+ if err != nil && !test.shouldErr {
+ t.Errorf("Test %d (%s): Expected no error, but had error: %v", i, test.input, err)
+ }
+ if err == nil && test.shouldErr {
+ t.Errorf("Test %d (%s): Expected error, but had none", i, test.input)
+ }
+
+ if !test.shouldErr && actual.Original != test.input {
+ t.Errorf("Test %d (%s): Expected original '%s', got '%s'", i, test.input, test.input, actual.Original)
+ }
+ if actual.Scheme != test.scheme {
+ t.Errorf("Test %d (%s): Expected scheme '%s', got '%s'", i, test.input, test.scheme, actual.Scheme)
+ }
+ if actual.Host != test.host {
+ t.Errorf("Test %d (%s): Expected host '%s', got '%s'", i, test.input, test.host, actual.Host)
+ }
+ if actual.Port != test.port {
+ t.Errorf("Test %d (%s): Expected port '%s', got '%s'", i, test.input, test.port, actual.Port)
+ }
+ if actual.Path != test.path {
+ t.Errorf("Test %d (%s): Expected path '%s', got '%s'", i, test.input, test.path, actual.Path)
+ }
+ }
+}
+
+func TestAddressString(t *testing.T) {
+ for i, test := range []struct {
+ addr Address
+ expected string
+ }{
+ {Address{Scheme: "http", Host: "host", Port: "1234", Path: "/path"}, "http://host:1234/path"},
+ {Address{Scheme: "", Host: "host", Port: "", Path: ""}, "http://host"},
+ {Address{Scheme: "", Host: "host", Port: "80", Path: ""}, "http://host"},
+ {Address{Scheme: "", Host: "host", Port: "443", Path: ""}, "https://host"},
+ {Address{Scheme: "https", Host: "host", Port: "443", Path: ""}, "https://host"},
+ {Address{Scheme: "https", Host: "host", Port: "", Path: ""}, "https://host"},
+ {Address{Scheme: "", Host: "host", Port: "80", Path: "/path"}, "http://host/path"},
+ {Address{Scheme: "http", Host: "", Port: "1234", Path: ""}, "http://:1234"},
+ {Address{Scheme: "", Host: "", Port: "", Path: ""}, ""},
+ } {
+ actual := test.addr.String()
+ if actual != test.expected {
+ t.Errorf("Test %d: expected '%s' but got '%s'", i, test.expected, actual)
+ }
+ }
+}
+
+func TestKeyNormalization(t *testing.T) {
+ testCases := []struct {
+ input string
+ expect string
+ }{
+ {
+ input: "http://host:1234/path",
+ expect: "http://host:1234/path",
+ },
+ {
+ input: "HTTP://A/ABCDEF",
+ expect: "http://a/ABCDEF",
+ },
+ {
+ input: "A/ABCDEF",
+ expect: "a/ABCDEF",
+ },
+ {
+ input: "A:2015/Path",
+ expect: "a:2015/Path",
+ },
+ {
+ input: ":80",
+ expect: ":80",
+ },
+ {
+ input: ":443",
+ expect: ":443",
+ },
+ {
+ input: ":1234",
+ expect: ":1234",
+ },
+ {
+ input: "",
+ expect: "",
+ },
+ {
+ input: ":",
+ expect: "",
+ },
+ {
+ input: "[::]",
+ expect: "::",
+ },
+ }
+ for i, tc := range testCases {
+ addr, err := ParseAddress(tc.input)
+ if err != nil {
+ t.Errorf("Test %d: Parsing address '%s': %v", i, tc.input, err)
+ continue
+ }
+ expect := tc.expect
+ if !caseSensitivePath {
+ // every other part of the address should be lowercased when normalized,
+ // so simply lower-case the whole thing to do case-insensitive comparison
+ // of the path as well
+ expect = strings.ToLower(expect)
+ }
+ if actual := addr.Normalize().Key(); actual != expect {
+ t.Errorf("Test %d: Normalized key for address '%s' was '%s' but expected '%s'", i, tc.input, actual, expect)
+ }
+
+ }
+}
diff --git a/caddyconfig/httpcaddyfile/builtins.go b/caddyconfig/httpcaddyfile/builtins.go
new file mode 100644
index 0000000..0fdfcd5
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/builtins.go
@@ -0,0 +1,255 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "encoding/json"
+ "fmt"
+ "html"
+ "net/http"
+ "reflect"
+
+ "github.com/caddyserver/caddy/caddyconfig"
+ "github.com/caddyserver/caddy/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddytls"
+)
+
+func init() {
+ RegisterDirective("bind", parseBind)
+ RegisterDirective("root", parseRoot)
+ RegisterDirective("tls", parseTLS)
+ RegisterHandlerDirective("redir", parseRedir)
+}
+
+func parseBind(h Helper) ([]ConfigValue, error) {
+ var lnHosts []string
+ for h.Next() {
+ lnHosts = append(lnHosts, h.RemainingArgs()...)
+ }
+ return h.NewBindAddresses(lnHosts), nil
+}
+
+func parseRoot(h Helper) ([]ConfigValue, error) {
+ if !h.Next() {
+ return nil, h.ArgErr()
+ }
+
+ matcherSet, ok, err := h.MatcherToken()
+ if err != nil {
+ return nil, err
+ }
+ if !ok {
+ // no matcher token; oops
+ h.Dispenser.Prev()
+ }
+
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ root := h.Val()
+ if h.NextArg() {
+ return nil, h.ArgErr()
+ }
+
+ varsHandler := caddyhttp.VarsMiddleware{"root": root}
+ route := caddyhttp.Route{
+ HandlersRaw: []json.RawMessage{
+ caddyconfig.JSONModuleObject(varsHandler, "handler", "vars", nil),
+ },
+ }
+ if matcherSet != nil {
+ route.MatcherSetsRaw = []map[string]json.RawMessage{matcherSet}
+ }
+
+ return h.NewVarsRoute(route), nil
+}
+
+func parseTLS(h Helper) ([]ConfigValue, error) {
+ var configVals []ConfigValue
+
+ cp := new(caddytls.ConnectionPolicy)
+ var fileLoader caddytls.FileLoader
+ var folderLoader caddytls.FolderLoader
+ var mgr caddytls.ACMEManagerMaker
+ var off bool
+
+ for h.Next() {
+ // file certificate loader
+ firstLine := h.RemainingArgs()
+ switch len(firstLine) {
+ case 0:
+ case 1:
+ if firstLine[0] == "off" {
+ off = true
+ } else {
+ mgr.Email = firstLine[0]
+ }
+ case 2:
+ fileLoader = append(fileLoader, caddytls.CertKeyFilePair{
+ Certificate: firstLine[0],
+ Key: firstLine[1],
+ // TODO: add tags, for enterprise module's certificate selection
+ })
+ default:
+ return nil, h.ArgErr()
+ }
+
+ var hasBlock bool
+ for h.NextBlock() {
+ hasBlock = true
+
+ switch h.Val() {
+
+ // connection policy
+ case "protocols":
+ args := h.RemainingArgs()
+ if len(args) == 0 {
+ return nil, h.SyntaxErr("one or two protocols")
+ }
+ if len(args) > 0 {
+ if _, ok := caddytls.SupportedProtocols[args[0]]; !ok {
+ return nil, h.Errf("Wrong protocol name or protocol not supported: '%s'", args[0])
+ }
+ cp.ProtocolMin = args[0]
+ }
+ if len(args) > 1 {
+ if _, ok := caddytls.SupportedProtocols[args[1]]; !ok {
+ return nil, h.Errf("Wrong protocol name or protocol not supported: '%s'", args[1])
+ }
+ cp.ProtocolMax = args[1]
+ }
+ case "ciphers":
+ for h.NextArg() {
+ if _, ok := caddytls.SupportedCipherSuites[h.Val()]; !ok {
+ return nil, h.Errf("Wrong cipher suite name or cipher suite not supported: '%s'", h.Val())
+ }
+ cp.CipherSuites = append(cp.CipherSuites, h.Val())
+ }
+ case "curves":
+ for h.NextArg() {
+ if _, ok := caddytls.SupportedCurves[h.Val()]; !ok {
+ return nil, h.Errf("Wrong curve name or curve not supported: '%s'", h.Val())
+ }
+ cp.Curves = append(cp.Curves, h.Val())
+ }
+ case "alpn":
+ args := h.RemainingArgs()
+ if len(args) == 0 {
+ return nil, h.ArgErr()
+ }
+ cp.ALPN = args
+
+ // certificate folder loader
+ case "load":
+ folderLoader = append(folderLoader, h.RemainingArgs()...)
+
+ // automation policy
+ case "ca":
+ arg := h.RemainingArgs()
+ if len(arg) != 1 {
+ return nil, h.ArgErr()
+ }
+ mgr.CA = arg[0]
+
+ // TODO: other properties for automation manager
+ }
+ }
+
+ // a naked tls directive is not allowed
+ if len(firstLine) == 0 && !hasBlock {
+ return nil, h.ArgErr()
+ }
+ }
+
+ // connection policy
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.connection_policy",
+ Value: cp,
+ })
+
+ // certificate loaders
+ if len(fileLoader) > 0 {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.certificate_loader",
+ Value: fileLoader,
+ })
+ }
+ if len(folderLoader) > 0 {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.certificate_loader",
+ Value: folderLoader,
+ })
+ }
+
+ // automation policy
+ if off {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.off",
+ Value: true,
+ })
+ } else if !reflect.DeepEqual(mgr, caddytls.ACMEManagerMaker{}) {
+ configVals = append(configVals, ConfigValue{
+ Class: "tls.automation_manager",
+ Value: mgr,
+ })
+ }
+
+ return configVals, nil
+}
+
+func parseRedir(h Helper) (caddyhttp.MiddlewareHandler, error) {
+ if !h.Next() {
+ return nil, h.ArgErr()
+ }
+
+ if !h.NextArg() {
+ return nil, h.ArgErr()
+ }
+ to := h.Val()
+
+ var code string
+ if h.NextArg() {
+ code = h.Val()
+ }
+ if code == "permanent" {
+ code = "301"
+ }
+ if code == "temporary" || code == "" {
+ code = "307"
+ }
+ var body string
+ if code == "meta" {
+ // Script tag comes first since that will better imitate a redirect in the browser's
+ // history, but the meta tag is a fallback for most non-JS clients.
+ const metaRedir = `<!DOCTYPE html>
+<html>
+ <head>
+ <title>Redirecting...</title>
+ <script>window.location.replace("%s");</script>
+ <meta http-equiv="refresh" content="0; URL='%s'">
+ </head>
+ <body>Redirecting to <a href="%s">%s</a>...</body>
+</html>
+`
+ safeTo := html.EscapeString(to)
+ body = fmt.Sprintf(metaRedir, safeTo, safeTo, safeTo, safeTo)
+ }
+
+ return caddyhttp.StaticResponse{
+ StatusCode: caddyhttp.WeakString(code),
+ Headers: http.Header{"Location": []string{to}},
+ Body: body,
+ }, nil
+}
diff --git a/caddyconfig/httpcaddyfile/directives.go b/caddyconfig/httpcaddyfile/directives.go
new file mode 100644
index 0000000..526ac87
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/directives.go
@@ -0,0 +1,182 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "encoding/json"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+// defaultDirectiveOrder specifies the order
+// to apply directives in HTTP routes.
+// TODO: finish the ability to customize this
+var defaultDirectiveOrder = []string{
+ "rewrite",
+ "try_files",
+ "headers",
+ "encode",
+ "templates",
+ "redir",
+ "static_response", // TODO: "reply" or "respond"?
+ "reverse_proxy",
+ "file_server",
+}
+
+// RegisterDirective registers a unique directive dir with an
+// associated unmarshaling (setup) function. When directive dir
+// is encountered in a Caddyfile, setupFunc will be called to
+// unmarshal its tokens.
+func RegisterDirective(dir string, setupFunc UnmarshalFunc) {
+ if _, ok := registeredDirectives[dir]; ok {
+ panic("directive " + dir + " already registered")
+ }
+ registeredDirectives[dir] = setupFunc
+}
+
+// RegisterHandlerDirective is like RegisterDirective, but for
+// directives which specifically output only an HTTP handler.
+func RegisterHandlerDirective(dir string, setupFunc UnmarshalHandlerFunc) {
+ RegisterDirective(dir, func(h Helper) ([]ConfigValue, error) {
+ if !h.Next() {
+ return nil, h.ArgErr()
+ }
+
+ matcherSet, ok, err := h.MatcherToken()
+ if err != nil {
+ return nil, err
+ }
+ if ok {
+ h.Dispenser.Delete() // strip matcher token
+ }
+
+ h.Dispenser.Reset() // pretend this lookahead never happened
+ val, err := setupFunc(h)
+ if err != nil {
+ return nil, err
+ }
+
+ return h.NewRoute(matcherSet, val), nil
+ })
+}
+
+// Helper is a type which helps setup a value from
+// Caddyfile tokens.
+type Helper struct {
+ *caddyfile.Dispenser
+ warnings *[]caddyconfig.Warning
+ matcherDefs map[string]map[string]json.RawMessage
+}
+
+// JSON converts val into JSON. Any errors are added to warnings.
+func (h Helper) JSON(val interface{}, warnings *[]caddyconfig.Warning) json.RawMessage {
+ return caddyconfig.JSON(val, h.warnings)
+}
+
+// MatcherToken assumes the current token is (possibly) a matcher, and
+// if so, returns the matcher set along with a true value. If the current
+// token is not a matcher, nil and false is returned. Note that a true
+// value may be returned with a nil matcher set if it is a catch-all.
+func (h Helper) MatcherToken() (map[string]json.RawMessage, bool, error) {
+ if !h.NextArg() {
+ return nil, false, nil
+ }
+ return matcherSetFromMatcherToken(h.Dispenser.Token(), h.matcherDefs, h.warnings)
+}
+
+// NewRoute returns config values relevant to creating a new HTTP route.
+func (h Helper) NewRoute(matcherSet map[string]json.RawMessage,
+ handler caddyhttp.MiddlewareHandler) []ConfigValue {
+ mod, err := caddy.GetModule(caddy.GetModuleName(handler))
+ if err != nil {
+ // TODO: append to warnings
+ }
+ var matcherSetsRaw []map[string]json.RawMessage
+ if matcherSet != nil {
+ matcherSetsRaw = append(matcherSetsRaw, matcherSet)
+ }
+ return []ConfigValue{
+ {
+ Class: "route",
+ Value: caddyhttp.Route{
+ MatcherSetsRaw: matcherSetsRaw,
+ HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(handler, "handler", mod.ID(), h.warnings)},
+ },
+ },
+ }
+}
+
+// NewBindAddresses returns config values relevant to adding
+// listener bind addresses to the config.
+func (h Helper) NewBindAddresses(addrs []string) []ConfigValue {
+ return []ConfigValue{{Class: "bind", Value: addrs}}
+}
+
+// NewVarsRoute returns config values relevant to adding a
+// "vars" wrapper route to the config.
+func (h Helper) NewVarsRoute(route caddyhttp.Route) []ConfigValue {
+ return []ConfigValue{{Class: "var", Value: route}}
+}
+
+// ConfigValue represents a value to be added to the final
+// configuration, or a value to be consulted when building
+// the final configuration.
+type ConfigValue struct {
+ // The kind of value this is. As the config is
+ // being built, the adapter will look in the
+ // "pile" for values belonging to a certain
+ // class when it is setting up a certain part
+ // of the config. The associated value will be
+ // type-asserted and placed accordingly.
+ Class string
+
+ // The value to be used when building the config.
+ // Generally its type is associated with the
+ // name of the Class.
+ Value interface{}
+
+ directive string
+}
+
+// serverBlock pairs a Caddyfile server block
+// with a "pile" of config values, keyed by class
+// name.
+type serverBlock struct {
+ block caddyfile.ServerBlock
+ pile map[string][]ConfigValue // config values obtained from directives
+}
+
+type (
+ // UnmarshalFunc is a function which can unmarshal Caddyfile
+ // tokens into zero or more config values using a Helper type.
+ // These are passed in a call to RegisterDirective.
+ UnmarshalFunc func(h Helper) ([]ConfigValue, error)
+
+ // UnmarshalHandlerFunc is like UnmarshalFunc, except the
+ // output of the unmarshaling is an HTTP handler. This
+ // function does not need to deal with HTTP request matching
+ // which is abstracted away. Since writing HTTP handlers
+ // with Caddyfile support is very common, this is a more
+ // convenient way to add a handler to the chain since a lot
+ // of the details common to HTTP handlers are taken care of
+ // for you. These are passed to a call to
+ // RegisterHandlerDirective.
+ UnmarshalHandlerFunc func(h Helper) (caddyhttp.MiddlewareHandler, error)
+)
+
+var registeredDirectives = make(map[string]UnmarshalFunc)
diff --git a/caddyconfig/httpcaddyfile/handlers.go b/caddyconfig/httpcaddyfile/handlers.go
new file mode 100644
index 0000000..9a29e97
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/handlers.go
@@ -0,0 +1,56 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+)
+
+func (st *ServerType) parseMatcherDefinitions(d *caddyfile.Dispenser) (map[string]map[string]json.RawMessage, error) {
+ matchers := make(map[string]map[string]json.RawMessage)
+ for d.Next() {
+ definitionName := d.Val()
+ for d.NextBlock() {
+ matcherName := d.Val()
+ mod, err := caddy.GetModule("http.matchers." + matcherName)
+ if err != nil {
+ return nil, fmt.Errorf("getting matcher module '%s': %v", matcherName, err)
+ }
+ unm, ok := mod.New().(caddyfile.Unmarshaler)
+ if !ok {
+ return nil, fmt.Errorf("matcher module '%s' is not a Caddyfile unmarshaler", matcherName)
+ }
+ err = unm.UnmarshalCaddyfile(d.NewFromNextTokens())
+ if err != nil {
+ return nil, err
+ }
+ rm, ok := unm.(caddyhttp.RequestMatcher)
+ if !ok {
+ return nil, fmt.Errorf("matcher module '%s' is not a request matcher", matcherName)
+ }
+ if _, ok := matchers[definitionName]; !ok {
+ matchers[definitionName] = make(map[string]json.RawMessage)
+ }
+ matchers[definitionName][matcherName] = caddyconfig.JSON(rm, nil)
+ }
+ }
+ return matchers, nil
+}
diff --git a/caddyconfig/httpcaddyfile/httptype.go b/caddyconfig/httpcaddyfile/httptype.go
new file mode 100644
index 0000000..42c1be5
--- /dev/null
+++ b/caddyconfig/httpcaddyfile/httptype.go
@@ -0,0 +1,519 @@
+// Copyright 2015 Matthew Holt and The Caddy Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httpcaddyfile
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/caddyserver/caddy/v2"
+ "github.com/caddyserver/caddy/v2/caddyconfig"
+ "github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
+ "github.com/caddyserver/caddy/v2/modules/caddyhttp"
+ "github.com/caddyserver/caddy/v2/modules/caddytls"
+ "github.com/mholt/certmagic"
+)
+
+func init() {
+ caddyconfig.RegisterAdapter("caddyfile", caddyfile.Adapter{ServerType: ServerType{}})
+}
+
+// ServerType can set up a config from an HTTP Caddyfile.
+type ServerType struct {
+}
+
+// Setup makes a config from the tokens.
+func (st ServerType) Setup(originalServerBlocks []caddyfile.ServerBlock,
+ options map[string]string) (*caddy.Config, []caddyconfig.Warning, error) {
+ var warnings []caddyconfig.Warning
+
+ var serverBlocks []serverBlock
+ for _, sblock := range originalServerBlocks {
+ serverBlocks = append(serverBlocks, serverBlock{
+ block: sblock,
+ pile: make(map[string][]ConfigValue),
+ })
+ }
+
+ for _, sb := range serverBlocks {
+ // replace shorthand placeholders (which are
+ // convenient when writing a Caddyfile) with
+ // their actual placeholder identifiers or
+ // variable names
+ replacer := strings.NewReplacer(
+ "{uri}", "{http.request.uri}",
+ "{path}", "{http.request.uri.path}",
+ "{host}", "{http.request.host}",
+ "{hostport}", "{http.request.hostport}",
+ "{method}", "{http.request.method}",
+ "{scheme}", "{http.request.scheme}",
+ "{file}", "{http.request.uri.path.file}",
+ "{dir}", "{http.request.uri.path.dir}",
+ "{query}", "{http.request.uri.query}",
+ )
+ for _, segment := range sb.block.Segments {
+ for i := 0; i < len(segment); i++ {
+ segment[i].Text = replacer.Replace(segment[i].Text)
+ }
+ }
+
+ // extract matcher definitions
+ d := sb.block.DispenseDirective("matcher")
+ matcherDefs, err := st.parseMatcherDefinitions(d)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ for _, segment := range sb.block.Segments {
+ dir := segment.Directive()
+ if dir == "matcher" {
+ // TODO: This is a special case because we pre-processed it; handle this better
+ continue
+ }
+ if dirFunc, ok := registeredDirectives[dir]; ok {
+ results, err := dirFunc(Helper{
+ Dispenser: segment.NewDispenser(),
+ warnings: &warnings,
+ matcherDefs: matcherDefs,
+ })
+ if err != nil {
+ return nil, warnings, fmt.Errorf("parsing caddyfile tokens for '%s': %v", dir, err)
+ }
+ for _, result := range results {
+ result.directive = dir
+ sb.pile[result.Class] = append(sb.pile[result.Class], result)
+ }
+ } else {
+ tkn := segment[0]
+ return nil, warnings, fmt.Errorf("%s:%d: unrecognized directive: %s", tkn.File, tkn.Line, dir)
+ }
+ }
+ }
+
+ // map
+ sbmap, err := st.mapAddressToServerBlocks(serverBlocks)
+ if err != nil {
+ return nil, warnings, err
+ }
+
+ // reduce
+ pairings := st.consolidateAddrMappings(sbmap)
+
+ // each pairing of listener addresses to list of server
+ // blocks is basically a server definition
+ servers, err := st.serversFromPairings(pairings, &warnings)
+ if err != nil {
+ return nil, warnings, err
+ }
+
+ // now that each server is configured, make the HTTP app
+ httpApp := caddyhttp.App{
+ HTTPPort: tryInt(options["http-port"], &warnings),
+ HTTPSPort: tryInt(options["https-port"], &warnings),
+ Servers: servers,
+ }
+
+ // now for the TLS app! (TODO: refactor into own func)
+ tlsApp := caddytls.TLS{Certificates: make(map[string]json.RawMessage)}
+ for _, p := range pairings {
+ for _, sblock := range p.serverBlocks {
+ // tls automation policies
+ if mmVals, ok := sblock.pile["tls.automation_manager"]; ok {
+ for _, mmVal := range mmVals {
+ mm := mmVal.Value.(caddytls.ManagerMaker)
+ sblockHosts, err := st.autoHTTPSHosts(sblock)
+ if err != nil {
+ return nil, warnings, err
+ }
+ tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, caddytls.AutomationPolicy{
+ Hosts: sblockHosts,
+ ManagementRaw: caddyconfig.JSONModuleObject(mm, "module", mm.(caddy.Module).CaddyModule().ID(), &warnings),
+ })
+ }
+ }
+
+ // tls certificate loaders
+ if clVals, ok := sblock.pile["tls.certificate_loader"]; ok {
+ for _, clVal := range clVals {
+ loader := clVal.Value.(caddytls.CertificateLoader)
+ loaderName := caddy.GetModuleName(loader)
+ tlsApp.Certificates[loaderName] = caddyconfig.JSON(loader, &warnings)
+ }
+ }
+ }
+ }
+ // consolidate automation policies that are the exact same
+ tlsApp.Automation.Policies = consolidateAutomationPolicies(tlsApp.Automation.Policies)
+
+ // annnd the top-level config, then we're done!
+ cfg := &caddy.Config{AppsRaw: make(map[string]json.RawMessage)}
+ if !reflect.DeepEqual(httpApp, caddyhttp.App{}) {
+ cfg.AppsRaw["http"] = caddyconfig.JSON(httpApp, &warnings)
+ }
+ if !reflect.DeepEqual(tlsApp, caddytls.TLS{}) {
+ cfg.AppsRaw["tls"] = caddyconfig.JSON(tlsApp, &warnings)
+ }
+
+ return cfg, warnings, nil
+}
+
+// hostsFromServerBlockKeys returns a list of all the
+// hostnames found in the keys of the server block sb.
+// The list may not be in a consistent order.
+func (st *ServerType) hostsFromServerBlockKeys(sb caddyfile.ServerBlock) ([]string, error) {
+ // first get each unique hostname
+ hostMap := make(map[string]struct{})
+ for _, sblockKey := range sb.Keys {
+ addr, err := ParseAddress(sblockKey)
+ if err != nil {
+ return nil, fmt.Errorf("parsing server block key: %v", err)
+ }
+ addr = addr.Normalize()
+ hostMap[addr.Host] = struct{}{}
+ }
+
+ // convert map to slice
+ sblockHosts := make([]string, 0, len(hostMap))
+ for host := range hostMap {
+ sblockHosts = append(sblockHosts, host)
+ }
+
+ return sblockHosts, nil
+}
+
+// serversFromPairings creates the servers for each pairing of addresses
+// to server blocks. Each pairing is essentially a server definition.
+func (st *ServerType) serversFromPairings(pairings []sbAddrAssociation, warnings *[]caddyconfig.Warning) (map[string]*caddyhttp.Server, error) {
+ servers := make(map[string]*caddyhttp.Server)
+
+ for i, p := range pairings {
+ srv := &caddyhttp.Server{
+ Listen: p.addresses,
+ }
+
+ for _, sblock := range p.serverBlocks {
+ matcherSetsEnc, err := st.compileEncodedMatcherSets(sblock.block)
+ if err != nil {
+ return nil, fmt.Errorf("server block %v: compiling matcher sets: %v", sblock.block.Keys, err)
+ }
+
+ // if there are user-defined variables, then siteVarSubroute will
+ // wrap the handlerSubroute; otherwise handlerSubroute will be the
+ // site's primary subroute.
+ siteVarSubroute, handlerSubroute := new(caddyhttp.Subroute), new(caddyhttp.Subroute)
+
+ // tls: connection policies and toggle auto HTTPS
+
+ autoHTTPSQualifiedHosts, err := st.autoHTTPSHosts(sblock)
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := sblock.pile["tls.off"]; ok {
+ // tls off: disable TLS (and automatic HTTPS) for server block's names
+ if srv.AutoHTTPS == nil {
+ srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
+ }
+ srv.AutoHTTPS.Skip = append(srv.AutoHTTPS.Skip, autoHTTPSQualifiedHosts...)
+ } else if cpVals, ok := sblock.pile["tls.connection_policy"]; ok {
+ // tls connection policies
+ for _, cpVal := range cpVals {
+ cp := cpVal.Value.(*caddytls.ConnectionPolicy)
+ // only create if there is a non-empty policy
+ if !reflect.DeepEqual(cp, new(caddytls.ConnectionPolicy)) {
+ // make sure the policy covers all hostnames from the block
+ hosts, err := st.hostsFromServerBlockKeys(sblock.block)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: are matchers needed if every hostname of the config is matched?
+ cp.Matchers = map[string]json.RawMessage{
+ "sni": caddyconfig.JSON(hosts, warnings), // make sure to match all hosts, not just auto-HTTPS-qualified ones
+ }
+ srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp)
+ }
+ }
+ // TODO: consolidate equal conn policies
+ }
+
+ // vars: special routes that will have to wrap the normal handlers
+ // so that these variables can be used across their matchers too
+ for _, cfgVal := range sblock.pile["var"] {
+ siteVarSubroute.Routes = append(siteVarSubroute.Routes, cfgVal.Value.(caddyhttp.Route))
+ }
+
+ // set up each handler directive
+ dirRoutes := sblock.pile["route"]
+ // TODO: The ordering here depends on... if there is a list of
+ // directives to use, then sort by that, otherwise just use in
+ // the order they appear in the slice (which is the order they
+ // appeared in the Caddyfile)
+ sortByList := true
+ if sortByList {
+ dirPositions := make(map[string]int)
+ for i, dir := range defaultDirectiveOrder {
+ dirPositions[dir] = i
+ }
+ sort.SliceStable(dirRoutes, func(i, j int) bool {
+ iDir, jDir := dirRoutes[i].directive, dirRoutes[j].directive
+ return dirPositions[iDir] < dirPositions[jDir]
+ })
+ }
+ for _, r := range dirRoutes {
+ handlerSubroute.Routes = append(handlerSubroute.Routes, r.Value.(caddyhttp.Route))
+ }
+
+ // the route that contains the site's handlers will
+ // be assumed to be the sub-route for this site...
+ siteSubroute := handlerSubroute
+
+ // ... unless, of course, there are variables that might
+ // be used by the site's matchers or handlers, in which
+ // case we need to nest the handlers in a sub-sub-route,
+ // and the variables go in the sub-route so the variables
+ // get evaluated first
+ if len(siteVarSubroute.Routes) > 0 {
+ subSubRoute := caddyhttp.Subroute{Routes: siteSubroute.Routes}
+ siteSubroute.Routes = append(
+ siteVarSubroute.Routes,
+ caddyhttp.Route{
+ HandlersRaw: []json.RawMessage{
+ caddyconfig.JSONModuleObject(subSubRoute, "handler", "subroute", warnings),
+ },
+ },
+ )
+ }
+
+ siteSubroute.Routes = consolidateRoutes(siteSubroute.Routes)
+
+ srv.Routes = append(srv.Routes, caddyhttp.Route{
+ MatcherSetsRaw: matcherSetsEnc,
+ HandlersRaw: []json.RawMessage{
+ caddyconfig.JSONModuleObject(siteSubroute, "handler", "subroute", warnings),
+ },
+ })
+ }
+
+ srv.Routes = consolidateRoutes(srv.Routes)
+
+ servers[fmt.Sprintf("srv%d", i)] = srv
+ }
+
+ return servers, nil
+}
+
+func (st ServerType) autoHTTPSHosts(sb serverBlock) ([]string, error) {
+ // get the hosts for this server block...
+ hosts, err := st.hostsFromServerBlockKeys(sb.block)
+ if err != nil {
+ return nil, err
+ }
+ // ...and of those, which ones qualify for auto HTTPS
+ var autoHTTPSQualifiedHosts []string
+ for _, h := range hosts {
+ if certmagic.HostQualifies(h) {
+ autoHTTPSQualifiedHosts = append(autoHTTPSQualifiedHosts, h)
+ }
+ }
+ return autoHTTPSQualifiedHosts, nil
+}
+
+// consolidateRoutes combines routes with the same properties
+// (same matchers, same Terminal and Group settings) for a
+// cleaner overall output.
+func consolidateRoutes(routes caddyhttp.RouteList) caddyhttp.RouteList {
+ for i := 0; i < len(routes)-1; i++ {
+ if reflect.DeepEqual(routes[i].MatcherSetsRaw, routes[i+1].MatcherSetsRaw) &&
+ routes[i].Terminal == routes[i+1].Terminal &&
+ routes[i].Group == routes[i+1].Group {
+ // keep the handlers in the same order, then splice out repetitive route
+ routes[i].HandlersRaw = append(routes[i].HandlersRaw, routes[i+1].HandlersRaw...)
+ routes = append(routes[:i+1], routes[i+2:]...)
+ i--
+ }
+ }
+ return routes
+}
+
+// consolidateAutomationPolicies combines automation policies that are the same,
+// for a cleaner overall output.
+func consolidateAutomationPolicies(aps []caddytls.AutomationPolicy) []caddytls.AutomationPolicy {
+ for i := 0; i < len(aps); i++ {
+ for j := 0; j < len(aps); j++ {
+ if j == i {
+ continue
+ }
+ if reflect.DeepEqual(aps[i].ManagementRaw, aps[j].ManagementRaw) {
+ aps[i].Hosts = append(aps[i].Hosts, aps[j].Hosts...)
+ }
+ aps = append(aps[:j], aps[j+1:]...)
+ i--
+ break
+ }
+ }
+ return aps
+}
+
+func matcherSetFromMatcherToken(
+ tkn caddyfile.Token,
+ matcherDefs map[string]map[string]json.RawMessage,
+ warnings *[]caddyconfig.Warning,
+) (map[string]json.RawMessage, bool, error) {
+ // matcher tokens can be wildcards, simple path matchers,
+ // or refer to a pre-defined matcher by some name
+ if tkn.Text == "*" {
+ // match all requests == no matchers, so nothing to do
+ return nil, true, nil
+ } else if strings.HasPrefix(tkn.Text, "/") {
+ // convenient way to specify a single path match
+ return map[string]json.RawMessage{
+ "path": caddyconfig.JSON(caddyhttp.MatchPath{tkn.Text}, warnings),
+ }, true, nil
+ } else if strings.HasPrefix(tkn.Text, "match:") {
+ // pre-defined matcher
+ matcherName := strings.TrimPrefix(tkn.Text, "match:")
+ m, ok := matcherDefs[matcherName]
+ if !ok {
+ return nil, false, fmt.Errorf("unrecognized matcher name: %+v", matcherName)
+ }
+ return m, true, nil
+ }
+
+ return nil, false, nil
+}
+
+func (st *ServerType) compileEncodedMatcherSets(sblock caddyfile.ServerBlock) ([]map[string]json.RawMessage, error) {
+ type hostPathPair struct {
+ hostm caddyhttp.MatchHost
+ pathm caddyhttp.MatchPath
+ }
+
+ // keep routes with common host and path matchers together
+ var matcherPairs []*hostPathPair
+
+ for _, key := range sblock.Keys {
+ addr, err := ParseAddress(key)
+ if err != nil {
+ return nil, fmt.Errorf("server block %v: parsing and standardizing address '%s': %v", sblock.Keys, key, err)
+ }
+ addr = addr.Normalize()
+
+ // choose a matcher pair that should be shared by this
+ // server block; if none exists yet, create one
+ var chosenMatcherPair *hostPathPair
+ for _, mp := range matcherPairs {
+ if (len(mp.pathm) == 0 && addr.Path == "") ||
+ (len(mp.pathm) == 1 && mp.pathm[0] == addr.Path) {
+ chosenMatcherPair = mp
+ break
+ }
+ }
+ if chosenMatcherPair == nil {
+ chosenMatcherPair = new(hostPathPair)
+ if addr.Path != "" {
+ chosenMatcherPair.pathm = []string{addr.Path}
+ }
+ matcherPairs = append(matcherPairs, chosenMatcherPair)
+ }
+
+ // add this server block's keys to the matcher
+ // pair if it doesn't already exist
+ if addr.Host != "" {
+ var found bool
+ for _, h := range chosenMatcherPair.hostm {
+ if h == addr.Host {
+ found = true
+ break
+ }
+ }
+ if !found {
+ chosenMatcherPair.hostm = append(chosenMatcherPair.hostm, addr.Host)
+ }
+ }
+ }
+
+ // iterate each pairing of host and path matchers and
+ // put them into a map for JSON encoding
+ var matcherSets []map[string]caddyhttp.RequestMatcher
+ for _, mp := range matcherPairs {
+ matcherSet := make(map[string]caddyhttp.RequestMatcher)
+ if len(mp.hostm) > 0 {
+ matcherSet["host"] = mp.hostm
+ }
+ if len(mp.pathm) > 0 {
+ matcherSet["path"] = mp.pathm
+ }
+ if len(matcherSet) > 0 {
+ matcherSets = append(matcherSets, matcherSet)
+ }
+ }
+
+ // finally, encode each of the matcher sets
+ var matcherSetsEnc []map[string]json.RawMessage
+ for _, ms := range matcherSets {
+ msEncoded, err := encodeMatcherSet(ms)
+ if err != nil {
+ return nil, fmt.Errorf("server block %v: %v", sblock.Keys, err)
+ }
+ matcherSetsEnc = append(matcherSetsEnc, msEncoded)
+ }
+
+ return matcherSetsEnc, nil
+}
+
+func encodeMatcherSet(matchers map[string]caddyhttp.RequestMatcher) (map[string]json.RawMessage, error) {
+ msEncoded := make(map[string]json.RawMessage)
+ for matcherName, val := range matchers {
+ jsonBytes, err := json.Marshal(val)
+ if err != nil {
+ return nil, fmt.Errorf("marshaling matcher set %#v: %v", matchers, err)
+ }
+ msEncoded[matcherName] = jsonBytes
+ }
+ return msEncoded, nil
+}
+
+// tryInt tries to convert str to an integer. If it fails, it downgrades
+// the error to a warning and returns 0.
+func tryInt(str string, warnings *[]caddyconfig.Warning) int {
+ if str == "" {
+ return 0
+ }
+ val, err := strconv.Atoi(str)
+ if err != nil && warnings != nil {
+ *warnings = append(*warnings, caddyconfig.Warning{Message: err.Error()})
+ }
+ return val
+}
+
+type matcherSetAndTokens struct {
+ matcherSet map[string]json.RawMessage
+ tokens []caddyfile.Token
+}
+
+// sbAddrAssocation is a mapping from a list of
+// addresses to a list of server blocks that are
+// served on those addresses.
+type sbAddrAssociation struct {
+ addresses []string
+ serverBlocks []serverBlock
+}
+
+// Interface guard
+var _ caddyfile.ServerType = (*ServerType)(nil)