summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWeidiDeng <weidi_deng@icloud.com>2023-05-26 03:05:00 +0800
committerGitHub <noreply@github.com>2023-05-25 13:05:00 -0600
commit9cde71552576dcc724eff38dee42879c927b72ec (patch)
tree5af6bec905fadbc7ef0f9d8be610f7ffe3758682
parent942fbb37ec46f1907e57a04c44ca790bd47ed52c (diff)
caddyfile: Track import name instead of modifying filename (#5540)
* Merge branch 'master' into import_file_stack * remove space in log key
-rw-r--r--caddyconfig/caddyfile/dispenser.go5
-rw-r--r--caddyconfig/caddyfile/importargs.go8
-rw-r--r--caddyconfig/caddyfile/lexer.go19
-rw-r--r--caddyconfig/caddyfile/parse.go10
-rw-r--r--caddyconfig/httpcaddyfile/builtins_test.go4
5 files changed, 15 insertions, 31 deletions
diff --git a/caddyconfig/caddyfile/dispenser.go b/caddyconfig/caddyfile/dispenser.go
index 0daed3c..580de4a 100644
--- a/caddyconfig/caddyfile/dispenser.go
+++ b/caddyconfig/caddyfile/dispenser.go
@@ -20,6 +20,7 @@ import (
"io"
"log"
"strconv"
+ "strings"
)
// Dispenser is a type that dispenses tokens, similarly to a lexer,
@@ -398,7 +399,7 @@ func (d *Dispenser) ArgErr() error {
// SyntaxErr creates a generic syntax error which explains what was
// found and what was expected.
func (d *Dispenser) SyntaxErr(expected string) error {
- msg := fmt.Sprintf("%s:%d - Syntax error: Unexpected token '%s', expecting '%s'", d.File(), d.Line(), d.Val(), expected)
+ msg := fmt.Sprintf("%s:%d - Syntax error: Unexpected token '%s', expecting '%s', import chain: ['%s']", d.File(), d.Line(), d.Val(), expected, strings.Join(d.Token().imports, "','"))
return errors.New(msg)
}
@@ -420,7 +421,7 @@ func (d *Dispenser) Errf(format string, args ...any) error {
// WrapErr takes an existing error and adds the Caddyfile file and line number.
func (d *Dispenser) WrapErr(err error) error {
- return fmt.Errorf("%s:%d - Error during parsing: %w", d.File(), d.Line(), err)
+ return fmt.Errorf("%s:%d - Error during parsing: %w, import chain: ['%s']", d.File(), d.Line(), err, strings.Join(d.Token().imports, "','"))
}
// Delete deletes the current token and returns the updated slice
diff --git a/caddyconfig/caddyfile/importargs.go b/caddyconfig/caddyfile/importargs.go
index 53725a1..c6dcd85 100644
--- a/caddyconfig/caddyfile/importargs.go
+++ b/caddyconfig/caddyfile/importargs.go
@@ -39,7 +39,7 @@ func parseVariadic(token Token, argCount int) (bool, int, int) {
if argRange == "" {
caddy.Log().Named("caddyfile").Warn(
"Placeholder "+token.Text+" cannot have an empty index",
- zap.String("file", token.File+":"+strconv.Itoa(token.Line)))
+ zap.String("file", token.File+":"+strconv.Itoa(token.Line)), zap.Strings("import_chain", token.imports))
return false, 0, 0
}
@@ -61,7 +61,7 @@ func parseVariadic(token Token, argCount int) (bool, int, int) {
if err != nil {
caddy.Log().Named("caddyfile").Warn(
"Variadic placeholder "+token.Text+" has an invalid start index",
- zap.String("file", token.File+":"+strconv.Itoa(token.Line)))
+ zap.String("file", token.File+":"+strconv.Itoa(token.Line)), zap.Strings("import_chain", token.imports))
return false, 0, 0
}
}
@@ -70,7 +70,7 @@ func parseVariadic(token Token, argCount int) (bool, int, int) {
if err != nil {
caddy.Log().Named("caddyfile").Warn(
"Variadic placeholder "+token.Text+" has an invalid end index",
- zap.String("file", token.File+":"+strconv.Itoa(token.Line)))
+ zap.String("file", token.File+":"+strconv.Itoa(token.Line)), zap.Strings("import_chain", token.imports))
return false, 0, 0
}
}
@@ -79,7 +79,7 @@ func parseVariadic(token Token, argCount int) (bool, int, int) {
if startIndex < 0 || startIndex > endIndex || endIndex > argCount {
caddy.Log().Named("caddyfile").Warn(
"Variadic placeholder "+token.Text+" indices are out of bounds, only "+strconv.Itoa(argCount)+" argument(s) exist",
- zap.String("file", token.File+":"+strconv.Itoa(token.Line)))
+ zap.String("file", token.File+":"+strconv.Itoa(token.Line)), zap.Strings("import_chain", token.imports))
return false, 0, 0
}
return true, startIndex, endIndex
diff --git a/caddyconfig/caddyfile/lexer.go b/caddyconfig/caddyfile/lexer.go
index 1f531f4..d1fbfbb 100644
--- a/caddyconfig/caddyfile/lexer.go
+++ b/caddyconfig/caddyfile/lexer.go
@@ -39,7 +39,7 @@ type (
// Token represents a single parsable unit.
Token struct {
File string
- origFile string
+ imports []string
Line int
Text string
wasQuoted rune // enclosing quote character, if any
@@ -321,23 +321,6 @@ func (l *lexer) finalizeHeredoc(val []rune, marker string) ([]rune, error) {
return []rune(out), nil
}
-// originalFile gets original filename before import modification.
-func (t Token) originalFile() string {
- if t.origFile != "" {
- return t.origFile
- }
- return t.File
-}
-
-// updateFile updates the token's source filename for error display
-// and remembers the original filename. Used during "import" processing.
-func (t *Token) updateFile(file string) {
- if t.origFile == "" {
- t.origFile = t.File
- }
- t.File = file
-}
-
func (t Token) Quoted() bool {
return t.wasQuoted > 0
}
diff --git a/caddyconfig/caddyfile/parse.go b/caddyconfig/caddyfile/parse.go
index 5afdc21..81181a4 100644
--- a/caddyconfig/caddyfile/parse.go
+++ b/caddyconfig/caddyfile/parse.go
@@ -429,7 +429,7 @@ func (p *parser) doImport(nesting int) error {
nodes = matches
}
- nodeName := p.Token().originalFile()
+ nodeName := p.File()
if p.Token().snippetName != "" {
nodeName += fmt.Sprintf(":%s", p.Token().snippetName)
}
@@ -453,18 +453,18 @@ func (p *parser) doImport(nesting int) error {
// golang for range slice return a copy of value
// similarly, append also copy value
for i, token := range importedTokens {
- // set the token's file to refer to import directive line number and snippet name
+ // update the token's imports to refer to import directive filename, line number and snippet name if there is one
if token.snippetName != "" {
- token.updateFile(fmt.Sprintf("%s:%d (import %s)", token.File, p.Line(), token.snippetName))
+ token.imports = append(token.imports, fmt.Sprintf("%s:%d (import %s)", p.File(), p.Line(), token.snippetName))
} else {
- token.updateFile(fmt.Sprintf("%s:%d (import)", token.File, p.Line()))
+ token.imports = append(token.imports, fmt.Sprintf("%s:%d (import)", p.File(), p.Line()))
}
// naive way of determine snippets, as snippets definition can only follow name + block
// format, won't check for nesting correctness or any other error, that's what parser does.
if !maybeSnippet && nesting == 0 {
// first of the line
- if i == 0 || importedTokens[i-1].originalFile() != token.originalFile() || importedTokens[i-1].Line+importedTokens[i-1].NumLineBreaks() < token.Line {
+ if i == 0 || importedTokens[i-1].File != token.File || importedTokens[i-1].Line+importedTokens[i-1].NumLineBreaks() < token.Line {
index = 0
} else {
index++
diff --git a/caddyconfig/httpcaddyfile/builtins_test.go b/caddyconfig/httpcaddyfile/builtins_test.go
index 34b4124..63dd141 100644
--- a/caddyconfig/httpcaddyfile/builtins_test.go
+++ b/caddyconfig/httpcaddyfile/builtins_test.go
@@ -229,7 +229,7 @@ func TestImportErrorLine(t *testing.T) {
import t1 true
}`,
errorFunc: func(err error) bool {
- return err != nil && strings.Contains(err.Error(), "Caddyfile:6 (import t1):2")
+ return err != nil && strings.Contains(err.Error(), "Caddyfile:6 (import t1)")
},
},
{
@@ -240,7 +240,7 @@ func TestImportErrorLine(t *testing.T) {
import t1 true
}`,
errorFunc: func(err error) bool {
- return err != nil && strings.Contains(err.Error(), "Caddyfile:5 (import t1):2")
+ return err != nil && strings.Contains(err.Error(), "Caddyfile:5 (import t1)")
},
},
{