Unverified Commit 50db0567 authored by Oren Shomron's avatar Oren Shomron Committed by GitHub
Browse files

Merge pull request #800 from shomron/issue-783-upgrade-go-jsonnet

Upgrade go-jsonnet to 0.11.2
parents b6b33c5b 595f5367
This diff is collapsed.
......@@ -44,7 +44,7 @@ required = ["k8s.io/kubernetes/pkg/kubectl/cmd/util"]
[[constraint]]
name = "github.com/google/go-jsonnet"
revision = "a1964b49f18919f5aaed17d3c2f6b48a35634b5c"
revision = "v0.11.2"
[[constraint]]
name = "github.com/googleapis/gnostic"
......
......@@ -9,6 +9,4 @@ before_install:
- go get github.com/mattn/goveralls
- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
- go get github.com/sergi/go-diff/diffmatchpatch
script:
- $HOME/gopath/bin/goveralls -service=travis-ci
- ./tests.sh --skip-go-test
script: ./travisBuild.sh
/*
Copyright 2018 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package ast provides AST nodes and ancillary structures and algorithms.
package ast
import (
"fmt"
)
// Fodder
// FodderKind is an enum.
type FodderKind int
const (
// FodderLineEnd represents a line ending.
//
// It indicates that the next token, paragraph, or interstitial
// should be on a new line.
//
// A single comment string is allowed, which flows before the new line.
//
// The LineEnd fodder specifies the indentation level and vertical spacing
// before whatever comes next.
FodderLineEnd FodderKind = iota
// FodderInterstitial represents a comment in middle of a line.
//
// They must be /* C-style */ comments.
//
// If it follows a token (i.e., it is the first fodder element) then it
// appears after the token on the same line. If it follows another
// interstitial, it will also flow after it on the same line. If it follows
// a new line or a paragraph, it is the first thing on the following line,
// after the blank lines and indentation specified by the previous fodder.
//
// There is exactly one comment string.
FodderInterstitial
// FodderParagraph represents a comment consisting of at least one line.
//
// // and # style comments have exactly one line. C-style comments can have
// more than one line.
//
// All lines of the comment are indented according to the indentation level
// of the previous new line / paragraph fodder.
//
// The Paragraph fodder specifies the indentation level and vertical spacing
// before whatever comes next.
FodderParagraph
)
// FodderElement is a single piece of fodder.
type FodderElement struct {
Kind FodderKind
Blanks int
Indent int
Comment []string
}
// MakeFodderElement is a helper function that checks some preconditions.
func MakeFodderElement(kind FodderKind, blanks int, indent int, comment []string) FodderElement {
if kind == FodderLineEnd && len(comment) > 1 {
panic(fmt.Sprintf("FodderLineEnd but comment == %v.", comment))
}
if kind == FodderInterstitial && blanks > 0 {
panic(fmt.Sprintf("FodderInterstitial but blanks == %d", blanks))
}
if kind == FodderInterstitial && indent > 0 {
panic(fmt.Sprintf("FodderInterstitial but indent == %d", blanks))
}
if kind == FodderInterstitial && len(comment) != 1 {
panic(fmt.Sprintf("FodderInterstitial but comment == %v.", comment))
}
if kind == FodderParagraph && len(comment) == 0 {
panic(fmt.Sprintf("FodderParagraph but comment was empty"))
}
return FodderElement{Kind: kind, Blanks: blanks, Indent: indent, Comment: comment}
}
// Fodder is stuff that is usually thrown away by lexers/preprocessors but is
// kept so that the source can be round tripped with near full fidelity.
type Fodder []FodderElement
// FodderHasCleanEndline is true if the fodder doesn't end with an interstitial.
func FodderHasCleanEndline(fodder Fodder) bool {
return len(fodder) > 0 && fodder[len(fodder)-1].Kind != FodderInterstitial
}
// FodderAppend appends to the fodder but preserves constraints.
//
// See FodderConcat below.
func FodderAppend(a *Fodder, elem FodderElement) {
if FodderHasCleanEndline(*a) && elem.Kind == FodderLineEnd {
if len(elem.Comment) > 0 {
// The line end had a comment, so create a single line paragraph for it.
*a = append(*a, MakeFodderElement(FodderParagraph, elem.Blanks, elem.Indent, elem.Comment))
} else {
back := &(*a)[len(*a)-1]
// Merge it into the previous line end.
back.Indent = elem.Indent
back.Blanks += elem.Blanks
}
} else {
if !FodderHasCleanEndline(*a) && elem.Kind == FodderParagraph {
*a = append(*a, MakeFodderElement(FodderLineEnd, 0, elem.Indent, []string{}))
}
*a = append(*a, elem)
}
}
// FodderConcat concats the two fodders but also preserves constraints.
//
// Namely, a FodderLineEnd is not allowed to follow a FodderParagraph or a FodderLineEnd.
func FodderConcat(a Fodder, b Fodder) Fodder {
if len(a) == 0 {
return b
}
if len(b) == 0 {
return a
}
r := a
// Carefully add the first element of b.
FodderAppend(&r, b[0])
// Add the rest of b.
for i := 1; i < len(b); i++ {
r = append(r, b[i])
}
return r
}
// FodderMoveFront moves b to the front of a.
func FodderMoveFront(a *Fodder, b *Fodder) {
*a = FodderConcat(*b, *a)
*b = Fodder{}
}
// FodderEnsureCleanNewline adds a LineEnd to the fodder if necessary.
func FodderEnsureCleanNewline(fodder *Fodder) {
if !FodderHasCleanEndline(*fodder) {
FodderAppend(fodder, MakeFodderElement(FodderLineEnd, 0, 0, []string{}))
}
}
// FodderElementCountNewlines returns the number of new line chars represented by the fodder element
func FodderElementCountNewlines(elem FodderElement) int {
switch elem.Kind {
case FodderInterstitial:
return 0
case FodderLineEnd:
return 1
case FodderParagraph:
return len(elem.Comment) + elem.Blanks
}
panic(fmt.Sprintf("Unknown FodderElement kind %d", elem.Kind))
}
// FodderCountNewlines returns the number of new line chars represented by the fodder.
func FodderCountNewlines(fodder Fodder) int {
sum := 0
for _, elem := range fodder {
sum += FodderElementCountNewlines(elem)
}
return sum
}
......@@ -795,10 +795,8 @@ func builtinNative(i *interpreter, trace TraceElement, name value) (value, error
index := str.getString()
if f, exists := i.nativeFuncs[index]; exists {
return &valueFunction{ec: f}, nil
}
return nil, i.Error(fmt.Sprintf("Unrecognized native function name: %v", index), trace)
return &valueNull{}, nil
}
type unaryBuiltinFunc func(*interpreter, TraceElement, value) (value, error)
......
......@@ -66,7 +66,7 @@ func directChildren(node ast.Node) []ast.Node {
case *ast.Slice:
return []ast.Node{node.Target, node.BeginIndex, node.EndIndex, node.Step}
case *ast.Local:
return []ast.Node{node.Body}
return nil
case *ast.LiteralBoolean:
return nil
case *ast.LiteralNull:
......@@ -249,7 +249,12 @@ func specialChildren(node ast.Node) []ast.Node {
case *ast.Slice:
return nil
case *ast.Local:
return nil
children := make([]ast.Node, 1, len(node.Binds)+1)
children[0] = node.Body
for _, bind := range node.Binds {
children = append(children, bind.Body)
}
return children
case *ast.LiteralBoolean:
return nil
case *ast.LiteralNull:
......
......@@ -26,27 +26,6 @@ import (
"github.com/google/go-jsonnet/ast"
)
// ---------------------------------------------------------------------------
// Fodder
//
// Fodder is stuff that is usually thrown away by lexers/preprocessors but is
// kept so that the source can be round tripped with full fidelity.
type fodderKind int
const (
fodderWhitespace fodderKind = iota
fodderCommentC
fodderCommentCpp
fodderCommentHash
)
type fodderElement struct {
kind fodderKind
data string
}
type fodder []fodderElement
// ---------------------------------------------------------------------------
// Token
......@@ -154,9 +133,9 @@ func (tk tokenKind) String() string {
}
type token struct {
kind tokenKind // The type of the token
fodder fodder // Any fodder the occurs before this token
data string // Content of the token if it is not a keyword
kind tokenKind // The type of the token
fodder ast.Fodder // Any fodder that occurs before this token
data string // Content of the token if it is not a keyword
// Extra info for when kind == tokenStringBlock
stringBlockIndent string // The sequence of whitespace that indented the block.
......@@ -209,6 +188,47 @@ func isSymbol(r rune) bool {
return false
}
func isHorizontalWhitespace(r rune) bool {
return r == ' ' || r == '\t' || r == '\r'
}
func isWhitespace(r rune) bool {
return r == '\n' || isHorizontalWhitespace(r)
}
// stripWhitespace strips whitespace from both ends of a string, but only up to
// margin on the left hand side. E.g., stripWhitespace(" foo ", 1) == " foo".
func stripWhitespace(s string, margin int) string {
runes := []rune(s)
if len(s) == 0 {
return s // Avoid underflow below.
}
i := 0
for i < len(runes) && isHorizontalWhitespace(runes[i]) && i < margin {
i++
}
j := len(runes)
for j > i && isHorizontalWhitespace(runes[j-1]) {
j--
}
return string(runes[i:j])
}
// Split a string by \n and also strip left (up to margin) & right whitespace from each line. */
func lineSplit(s string, margin int) []string {
var ret []string
var buf bytes.Buffer
for _, r := range s {
if r == '\n' {
ret = append(ret, stripWhitespace(buf.String(), margin))
buf.Reset()
} else {
buf.WriteRune(r)
}
}
return append(ret, stripWhitespace(buf.String(), margin))
}
// Check that b has at least the same whitespace prefix as a and returns the
// amount of this whitespace, otherwise returns 0. If a has no whitespace
// prefix than return 0.
......@@ -254,9 +274,12 @@ type lexer struct {
tokens Tokens // The tokens that we've generated so far
// Information about the token we are working on right now
fodder fodder
fodder ast.Fodder
tokenStart int
tokenStartLoc ast.Location
// Was the last rune the first rune on a line (ignoring initial whitespace).
freshLine bool
}
const lexEOF = -1
......@@ -269,6 +292,7 @@ func makeLexer(fn string, input string) *lexer {
pos: position{byteNo: 0, lineNo: 1, lineStart: 0},
prev: position{byteNo: lexEOF, lineNo: 0, lineStart: 0},
tokenStartLoc: ast.Location{Line: 1, Column: 1},
freshLine: true,
}
}
......@@ -284,6 +308,11 @@ func (l *lexer) next() rune {
if r == '\n' {
l.pos.lineStart = l.pos.byteNo
l.pos.lineNo++
l.freshLine = true
} else if l.freshLine {
if !isWhitespace(r) {
l.freshLine = false
}
}
return r
}
......@@ -302,6 +331,7 @@ func (l *lexer) peek() rune {
}
// backup steps back one rune. Can only be called once per call of next.
// It also does not recover the previous value of freshLine.
func (l *lexer) backup() {
if l.prev.byteNo == lexEOF {
panic("backup called with no valid previous rune")
......@@ -342,7 +372,7 @@ func (l *lexer) emitFullToken(kind tokenKind, data, stringBlockIndent, stringBlo
stringBlockTermIndent: stringBlockTermIndent,
loc: ast.MakeLocationRange(l.fileName, l.source, l.tokenStartLoc, l.location()),
})
l.fodder = fodder{}
l.fodder = ast.Fodder{}
}
func (l *lexer) emitToken(kind tokenKind) {
......@@ -350,28 +380,75 @@ func (l *lexer) emitToken(kind tokenKind) {
l.resetTokenStart()
}
func (l *lexer) addWhitespaceFodder() {
fodderData := l.input[l.tokenStart:l.pos.byteNo]
if len(l.fodder) == 0 || l.fodder[len(l.fodder)-1].kind != fodderWhitespace {
l.fodder = append(l.fodder, fodderElement{kind: fodderWhitespace, data: fodderData})
} else {
l.fodder[len(l.fodder)-1].data += fodderData
}
l.resetTokenStart()
func (l *lexer) addFodder(kind ast.FodderKind, blanks int, indent int, comment []string) {
elem := ast.MakeFodderElement(kind, blanks, indent, comment)
l.fodder = append(l.fodder, elem)
}
func (l *lexer) addCommentFodder(kind fodderKind) {
fodderData := l.input[l.tokenStart:l.pos.byteNo]
l.fodder = append(l.fodder, fodderElement{kind: kind, data: fodderData})
l.resetTokenStart()
func (l *lexer) makeStaticErrorPoint(msg string, loc ast.Location) StaticError {
return StaticError{Msg: msg, Loc: ast.MakeLocationRange(l.fileName, l.source, loc, loc)}
}
func (l *lexer) addFodder(kind fodderKind, data string) {
l.fodder = append(l.fodder, fodderElement{kind: kind, data: data})
}
// lexWhitespace consumes all whitespace and returns the number of \n and number of
// spaces after last \n. It also converts \t to spaces.
// The parameter 'r' is the rune that begins the whitespace.
func (l *lexer) lexWhitespace() (int, int) {
r := l.next()
indent := 0
newLines := 0
for ; isWhitespace(r); r = l.next() {
switch r {
case '\r':
// Ignore.
break
func (l *lexer) makeStaticErrorPoint(msg string, loc ast.Location) StaticError {
return StaticError{Msg: msg, Loc: ast.MakeLocationRange(l.fileName, l.source, loc, loc)}
case '\n':
indent = 0
newLines++
break
case ' ':
indent++
break
// This only works for \t at the beginning of lines, but we strip it everywhere else
// anyway. The only case where this will cause a problem is spaces followed by \t
// at the beginning of a line. However that is rare, ill-advised, and if re-indentation
// is enabled it will be fixed later.
case '\t':
indent += 8
break
}
}
l.backup()
return newLines, indent
}
// lexUntilNewLine consumes all text until the end of the line and returns the
// number of newlines after that as well as the next indent.
func (l *lexer) lexUntilNewline() (string, int, int) {
// Compute 'text'.
var buf bytes.Buffer
lastNonSpace := 0
for r := l.next(); r != lexEOF && r != '\n'; r = l.next() {
buf.WriteRune(r)
if !isHorizontalWhitespace(r) {
lastNonSpace = buf.Len()
}
}
l.backup()
// Trim whitespace off the end.
buf.Truncate(lastNonSpace)
text := buf.String()
// Consume the '\n' and following indent.
var newLines int
newLines, indent := l.lexWhitespace()
blanks := 0
if newLines > 0 {
blanks = newLines - 1
}
return text, blanks, indent
}
// lexNumber will consume a number and emit a token. It is assumed
......@@ -548,34 +625,70 @@ func (l *lexer) lexSymbol() error {
r := l.next()
// Single line C++ style comment
if r == '/' && l.peek() == '/' {
l.next()
l.resetTokenStart() // Throw out the leading //
for r = l.next(); r != lexEOF && r != '\n'; r = l.next() {
if r == '#' || (r == '/' && l.peek() == '/') {
comment, blanks, indent := l.lexUntilNewline()
var k ast.FodderKind
if l.freshLine {
k = ast.FodderParagraph
} else {
k = ast.FodderLineEnd
}
// Leave the '\n' in the lexer to be fodder for the next round
l.backup()
l.addCommentFodder(fodderCommentCpp)
l.addFodder(k, blanks, indent, []string{string(r) + comment})
return nil
}
// C style comment (could be interstitial or paragraph comment)
if r == '/' && l.peek() == '*' {
margin := l.pos.byteNo - l.pos.lineStart
commentStartLoc := l.tokenStartLoc
l.next() // consume the '*'
l.resetTokenStart() // Throw out the leading /*
for r = l.next(); ; r = l.next() {
r := l.next() // consume the initial '*'
for r = l.next(); r != '*' || l.peek() != '/'; r = l.next() {
if r == lexEOF {
return l.makeStaticErrorPoint("Multi-line comment has no terminating */",
return l.makeStaticErrorPoint(
"Multi-line comment has no terminating */",
commentStartLoc)
}
if r == '*' && l.peek() == '/' {
commentData := l.input[l.tokenStart : l.pos.byteNo-1] // Don't include trailing */
l.addFodder(fodderCommentC, commentData)
l.next() // Skip past '/'
l.resetTokenStart() // Start next token at this point
return nil
}
l.next() // Consume trailing '/'
// Includes the "/*" and "*/".
comment := l.input[l.tokenStart:l.pos.byteNo]
newLinesAfter, indentAfter := l.lexWhitespace()
if !strings.ContainsRune(comment, '\n') {
l.addFodder(ast.FodderInterstitial, 0, 0, []string{comment})
if newLinesAfter > 0 {
l.addFodder(ast.FodderLineEnd, newLinesAfter-1, indentAfter, []string{})
}
} else {
lines := lineSplit(comment, margin)
if lines[0][0] != '/' {
panic(fmt.Sprintf("Invalid parsing of C style comment %v", lines))
}
// Little hack to support FodderParagraphs with * down the LHS:
// Add a space to lines that start with a '*'
allStar := true
for _, l := range lines {
if len(l) == 0 || l[0] != '*' {
allStar = false
}
}
if allStar {
for _, l := range lines {
if l[0] == '*' {
l = " " + l
}
}
}
if newLinesAfter == 0 {
// Ensure a line end after the paragraph.
newLinesAfter = 1
indentAfter = 0
}
l.addFodder(ast.FodderParagraph, newLinesAfter-1, indentAfter, lines)
}
return nil
}
if r == '|' && strings.HasPrefix(l.input[l.pos.byteNo:], "||") {
......@@ -688,12 +801,22 @@ func Lex(fn string, input string) (Tokens, error) {
l := makeLexer(fn, input)
var err error
for r := l.next(); r != lexEOF; r = l.next() {
for true {
newLines, indent := l.lexWhitespace()
// If it's the end of the file, discard final whitespace.
if l.peek() == lexEOF {
l.next()
l.resetTokenStart()
break
}
if newLines > 0 {
// Otherwise store whitespace in fodder.
blanks := newLines - 1
l.addFodder(ast.FodderLineEnd, blanks, indent, []string{})
}
l.resetTokenStart() // Don't include whitespace in actual token.
r := l.next()
switch r {
case ' ', '\t', '\r', '\n':
l.addWhitespaceFodder()
continue
case '{':
l.emitToken(tokenBraceL)
case '}':
......@@ -791,19 +914,11 @@ func Lex(fn string, input string) (Tokens, error) {
}
}
case '#':
l.resetTokenStart() // Throw out the leading #
for r = l.next(); r != lexEOF && r != '\n'; r = l.next() {
}
// Leave the '\n' in the lexer to be fodder for the next round