From 2a796232917db40857847322f1f59fa61e729b0a Mon Sep 17 00:00:00 2001 From: Angus Lees <gus@inodes.org> Date: Fri, 9 Jun 2017 18:57:50 +1000 Subject: [PATCH] Vendor updates for previous commit --- vendor/github.com/mattn/go-isatty/LICENSE | 9 + vendor/github.com/mattn/go-isatty/README.md | 50 + vendor/github.com/mattn/go-isatty/doc.go | 2 + .../mattn/go-isatty/isatty_appengine.go | 15 + .../github.com/mattn/go-isatty/isatty_bsd.go | 18 + .../mattn/go-isatty/isatty_linux.go | 18 + .../mattn/go-isatty/isatty_others.go | 10 + .../mattn/go-isatty/isatty_solaris.go | 16 + .../mattn/go-isatty/isatty_windows.go | 94 ++ vendor/github.com/sergi/go-diff/LICENSE | 20 + .../sergi/go-diff/diffmatchpatch/diff.go | 1344 +++++++++++++++++ .../go-diff/diffmatchpatch/diffmatchpatch.go | 46 + .../sergi/go-diff/diffmatchpatch/match.go | 160 ++ .../sergi/go-diff/diffmatchpatch/mathutil.go | 23 + .../sergi/go-diff/diffmatchpatch/patch.go | 556 +++++++ .../go-diff/diffmatchpatch/stringutil.go | 88 ++ vendor/github.com/yudai/gojsondiff/LICENSE | 145 ++ vendor/github.com/yudai/gojsondiff/Makefile | 2 + vendor/github.com/yudai/gojsondiff/README.md | 157 ++ vendor/github.com/yudai/gojsondiff/deltas.go | 461 ++++++ .../yudai/gojsondiff/formatter/ascii.go | 370 +++++ .../yudai/gojsondiff/formatter/delta.go | 124 ++ .../github.com/yudai/gojsondiff/gojsondiff.go | 426 ++++++ .../yudai/gojsondiff/unmarshaler.go | 131 ++ .../github.com/yudai/gojsondiff/wercker.yml | 8 + vendor/github.com/yudai/golcs/LICENSE | 21 + vendor/github.com/yudai/golcs/README.md | 60 + vendor/github.com/yudai/golcs/golcs.go | 195 +++ vendor/vendor.json | 30 + 29 files changed, 4599 insertions(+) create mode 100644 vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 vendor/github.com/mattn/go-isatty/README.md create mode 100644 vendor/github.com/mattn/go-isatty/doc.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_appengine.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_linux.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_others.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/sergi/go-diff/LICENSE create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/match.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go create mode 100644 vendor/github.com/yudai/gojsondiff/LICENSE create mode 100644 vendor/github.com/yudai/gojsondiff/Makefile create mode 100644 vendor/github.com/yudai/gojsondiff/README.md create mode 100644 vendor/github.com/yudai/gojsondiff/deltas.go create mode 100644 vendor/github.com/yudai/gojsondiff/formatter/ascii.go create mode 100644 vendor/github.com/yudai/gojsondiff/formatter/delta.go create mode 100644 vendor/github.com/yudai/gojsondiff/gojsondiff.go create mode 100644 vendor/github.com/yudai/gojsondiff/unmarshaler.go create mode 100644 vendor/github.com/yudai/gojsondiff/wercker.yml create mode 100644 vendor/github.com/yudai/golcs/LICENSE create mode 100644 vendor/github.com/yudai/golcs/README.md create mode 100644 vendor/github.com/yudai/golcs/golcs.go diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 00000000..65dc692b --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com> + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/README.md b/vendor/github.com/mattn/go-isatty/README.md new file mode 100644 index 00000000..1e69004b --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/README.md @@ -0,0 +1,50 @@ +# go-isatty + +[](http://godoc.org/github.com/mattn/go-isatty) +[](https://travis-ci.org/mattn/go-isatty) +[](https://coveralls.io/github/mattn/go-isatty?branch=master) +[](https://goreportcard.com/report/mattn/go-isatty) + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else if isatty.IsCygwinTerminal(os.Stdout.Fd()) { + fmt.Println("Is Cygwin/MSYS2 Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +## License + +MIT + +## Author + +Yasuhiro Matsumoto (a.k.a mattn) + +## Thanks + +* k-takata: base idea for IsCygwinTerminal + + https://github.com/k-takata/go-iscygpty diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 00000000..17d4f90e --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go new file mode 100644 index 00000000..9584a988 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_appengine.go @@ -0,0 +1,15 @@ +// +build appengine + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on on appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 00000000..42f2514d --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go new file mode 100644 index 00000000..9d24bac1 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go @@ -0,0 +1,18 @@ +// +build linux +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_others.go b/vendor/github.com/mattn/go-isatty/isatty_others.go new file mode 100644 index 00000000..ff4de3d9 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_others.go @@ -0,0 +1,10 @@ +// +build !windows +// +build !appengine + +package isatty + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. This is also always false on this environment. +func IsCygwinTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 00000000..1f0c6bf5 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,16 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 00000000..af51cbca --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,94 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +const ( + fileNameInfo uintptr = 2 + fileTypePipe = 3 +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procGetFileInformationByHandleEx = kernel32.NewProc("GetFileInformationByHandleEx") + procGetFileType = kernel32.NewProc("GetFileType") +) + +func init() { + // Check if GetFileInformationByHandleEx is available. + if procGetFileInformationByHandleEx.Find() != nil { + procGetFileInformationByHandleEx = nil + } +} + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} + +// Check pipe name is used for cygwin/msys2 pty. +// Cygwin/MSYS2 PTY has a name like: +// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master +func isCygwinPipeName(name string) bool { + token := strings.Split(name, "-") + if len(token) < 5 { + return false + } + + if token[0] != `\msys` && token[0] != `\cygwin` { + return false + } + + if token[1] == "" { + return false + } + + if !strings.HasPrefix(token[2], "pty") { + return false + } + + if token[3] != `from` && token[3] != `to` { + return false + } + + if token[4] != "master" { + return false + } + + return true +} + +// IsCygwinTerminal() return true if the file descriptor is a cygwin or msys2 +// terminal. +func IsCygwinTerminal(fd uintptr) bool { + if procGetFileInformationByHandleEx == nil { + return false + } + + // Cygwin/msys's pty is a pipe. + ft, _, e := syscall.Syscall(procGetFileType.Addr(), 1, fd, 0, 0) + if ft != fileTypePipe || e != 0 { + return false + } + + var buf [2 + syscall.MAX_PATH]uint16 + r, _, e := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), + 4, fd, fileNameInfo, uintptr(unsafe.Pointer(&buf)), + uintptr(len(buf)*2), 0, 0) + if r == 0 || e != 0 { + return false + } + + l := *(*uint32)(unsafe.Pointer(&buf)) + return isCygwinPipeName(string(utf16.Decode(buf[2 : 2+l/2]))) +} diff --git a/vendor/github.com/sergi/go-diff/LICENSE b/vendor/github.com/sergi/go-diff/LICENSE new file mode 100644 index 00000000..937942c2 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2012-2016 The go-diff Authors. All rights reserved. + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go new file mode 100644 index 00000000..82ad7bc8 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go @@ -0,0 +1,1344 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "fmt" + "html" + "math" + "net/url" + "regexp" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Operation defines the operation of a diff item. +type Operation int8 + +const ( + // DiffDelete item represents a delete diff. + DiffDelete Operation = -1 + // DiffInsert item represents an insert diff. + DiffInsert Operation = 1 + // DiffEqual item represents an equal diff. + DiffEqual Operation = 0 +) + +// Diff represents one diff operation +type Diff struct { + Type Operation + Text string +} + +func splice(slice []Diff, index int, amount int, elements ...Diff) []Diff { + return append(slice[:index], append(elements, slice[index+amount:]...)...) +} + +// DiffMain finds the differences between two texts. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMain(text1, text2 string, checklines bool) []Diff { + return dmp.DiffMainRunes([]rune(text1), []rune(text2), checklines) +} + +// DiffMainRunes finds the differences between two rune sequences. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +func (dmp *DiffMatchPatch) DiffMainRunes(text1, text2 []rune, checklines bool) []Diff { + var deadline time.Time + if dmp.DiffTimeout > 0 { + deadline = time.Now().Add(dmp.DiffTimeout) + } + return dmp.diffMainRunes(text1, text2, checklines, deadline) +} + +func (dmp *DiffMatchPatch) diffMainRunes(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + if runesEqual(text1, text2) { + var diffs []Diff + if len(text1) > 0 { + diffs = append(diffs, Diff{DiffEqual, string(text1)}) + } + return diffs + } + // Trim off common prefix (speedup). + commonlength := commonPrefixLength(text1, text2) + commonprefix := text1[:commonlength] + text1 = text1[commonlength:] + text2 = text2[commonlength:] + + // Trim off common suffix (speedup). + commonlength = commonSuffixLength(text1, text2) + commonsuffix := text1[len(text1)-commonlength:] + text1 = text1[:len(text1)-commonlength] + text2 = text2[:len(text2)-commonlength] + + // Compute the diff on the middle block. + diffs := dmp.diffCompute(text1, text2, checklines, deadline) + + // Restore the prefix and suffix. + if len(commonprefix) != 0 { + diffs = append([]Diff{Diff{DiffEqual, string(commonprefix)}}, diffs...) + } + if len(commonsuffix) != 0 { + diffs = append(diffs, Diff{DiffEqual, string(commonsuffix)}) + } + + return dmp.DiffCleanupMerge(diffs) +} + +// diffCompute finds the differences between two rune slices. Assumes that the texts do not have any common prefix or suffix. +func (dmp *DiffMatchPatch) diffCompute(text1, text2 []rune, checklines bool, deadline time.Time) []Diff { + diffs := []Diff{} + if len(text1) == 0 { + // Just add some text (speedup). + return append(diffs, Diff{DiffInsert, string(text2)}) + } else if len(text2) == 0 { + // Just delete some text (speedup). + return append(diffs, Diff{DiffDelete, string(text1)}) + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if i := runesIndex(longtext, shorttext); i != -1 { + op := DiffInsert + // Swap insertions for deletions if diff is reversed. + if len(text1) > len(text2) { + op = DiffDelete + } + // Shorter text is inside the longer text (speedup). + return []Diff{ + Diff{op, string(longtext[:i])}, + Diff{DiffEqual, string(shorttext)}, + Diff{op, string(longtext[i+len(shorttext):])}, + } + } else if len(shorttext) == 1 { + // Single character string. + // After the previous speedup, the character can't be an equality. + return []Diff{ + Diff{DiffDelete, string(text1)}, + Diff{DiffInsert, string(text2)}, + } + // Check to see if the problem can be split in two. + } else if hm := dmp.diffHalfMatch(text1, text2); hm != nil { + // A half-match was found, sort out the return data. + text1A := hm[0] + text1B := hm[1] + text2A := hm[2] + text2B := hm[3] + midCommon := hm[4] + // Send both pairs off for separate processing. + diffsA := dmp.diffMainRunes(text1A, text2A, checklines, deadline) + diffsB := dmp.diffMainRunes(text1B, text2B, checklines, deadline) + // Merge the results. + return append(diffsA, append([]Diff{Diff{DiffEqual, string(midCommon)}}, diffsB...)...) + } else if checklines && len(text1) > 100 && len(text2) > 100 { + return dmp.diffLineMode(text1, text2, deadline) + } + return dmp.diffBisect(text1, text2, deadline) +} + +// diffLineMode does a quick line-level diff on both []runes, then rediff the parts for greater accuracy. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) diffLineMode(text1, text2 []rune, deadline time.Time) []Diff { + // Scan the text on a line-by-line basis first. + text1, text2, linearray := dmp.diffLinesToRunes(text1, text2) + + diffs := dmp.diffMainRunes(text1, text2, false, deadline) + + // Convert the diff back to original text. + diffs = dmp.DiffCharsToLines(diffs, linearray) + // Eliminate freak matches (e.g. blank lines) + diffs = dmp.DiffCleanupSemantic(diffs) + + // Rediff any replacement blocks, this time character-by-character. + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + + pointer := 0 + countDelete := 0 + countInsert := 0 + + // NOTE: Rune slices are slower than using strings in this case. + textDelete := "" + textInsert := "" + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert += diffs[pointer].Text + case DiffDelete: + countDelete++ + textDelete += diffs[pointer].Text + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete >= 1 && countInsert >= 1 { + // Delete the offending records and add the merged ones. + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert) + + pointer = pointer - countDelete - countInsert + a := dmp.diffMainRunes([]rune(textDelete), []rune(textInsert), false, deadline) + for j := len(a) - 1; j >= 0; j-- { + diffs = splice(diffs, pointer, 0, a[j]) + } + pointer = pointer + len(a) + } + + countInsert = 0 + countDelete = 0 + textDelete = "" + textInsert = "" + } + pointer++ + } + + return diffs[:len(diffs)-1] // Remove the dummy entry at the end. +} + +// DiffBisect finds the 'middle snake' of a diff, split the problem in two and return the recursively constructed diff. +// If an invalid UTF-8 sequence is encountered, it will be replaced by the Unicode replacement character. +// See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) DiffBisect(text1, text2 string, deadline time.Time) []Diff { + // Unused in this code, but retained for interface compatibility. + return dmp.diffBisect([]rune(text1), []rune(text2), deadline) +} + +// diffBisect finds the 'middle snake' of a diff, splits the problem in two and returns the recursively constructed diff. +// See Myers's 1986 paper: An O(ND) Difference Algorithm and Its Variations. +func (dmp *DiffMatchPatch) diffBisect(runes1, runes2 []rune, deadline time.Time) []Diff { + // Cache the text lengths to prevent multiple calls. + runes1Len, runes2Len := len(runes1), len(runes2) + + maxD := (runes1Len + runes2Len + 1) / 2 + vOffset := maxD + vLength := 2 * maxD + + v1 := make([]int, vLength) + v2 := make([]int, vLength) + for i := range v1 { + v1[i] = -1 + v2[i] = -1 + } + v1[vOffset+1] = 0 + v2[vOffset+1] = 0 + + delta := runes1Len - runes2Len + // If the total number of characters is odd, then the front path will collide with the reverse path. + front := (delta%2 != 0) + // Offsets for start and end of k loop. Prevents mapping of space beyond the grid. + k1start := 0 + k1end := 0 + k2start := 0 + k2end := 0 + for d := 0; d < maxD; d++ { + // Bail out if deadline is reached. + if !deadline.IsZero() && time.Now().After(deadline) { + break + } + + // Walk the front path one step. + for k1 := -d + k1start; k1 <= d-k1end; k1 += 2 { + k1Offset := vOffset + k1 + var x1 int + + if k1 == -d || (k1 != d && v1[k1Offset-1] < v1[k1Offset+1]) { + x1 = v1[k1Offset+1] + } else { + x1 = v1[k1Offset-1] + 1 + } + + y1 := x1 - k1 + for x1 < runes1Len && y1 < runes2Len { + if runes1[x1] != runes2[y1] { + break + } + x1++ + y1++ + } + v1[k1Offset] = x1 + if x1 > runes1Len { + // Ran off the right of the graph. + k1end += 2 + } else if y1 > runes2Len { + // Ran off the bottom of the graph. + k1start += 2 + } else if front { + k2Offset := vOffset + delta - k1 + if k2Offset >= 0 && k2Offset < vLength && v2[k2Offset] != -1 { + // Mirror x2 onto top-left coordinate system. + x2 := runes1Len - v2[k2Offset] + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + // Walk the reverse path one step. + for k2 := -d + k2start; k2 <= d-k2end; k2 += 2 { + k2Offset := vOffset + k2 + var x2 int + if k2 == -d || (k2 != d && v2[k2Offset-1] < v2[k2Offset+1]) { + x2 = v2[k2Offset+1] + } else { + x2 = v2[k2Offset-1] + 1 + } + var y2 = x2 - k2 + for x2 < runes1Len && y2 < runes2Len { + if runes1[runes1Len-x2-1] != runes2[runes2Len-y2-1] { + break + } + x2++ + y2++ + } + v2[k2Offset] = x2 + if x2 > runes1Len { + // Ran off the left of the graph. + k2end += 2 + } else if y2 > runes2Len { + // Ran off the top of the graph. + k2start += 2 + } else if !front { + k1Offset := vOffset + delta - k2 + if k1Offset >= 0 && k1Offset < vLength && v1[k1Offset] != -1 { + x1 := v1[k1Offset] + y1 := vOffset + x1 - k1Offset + // Mirror x2 onto top-left coordinate system. + x2 = runes1Len - x2 + if x1 >= x2 { + // Overlap detected. + return dmp.diffBisectSplit(runes1, runes2, x1, y1, deadline) + } + } + } + } + } + // Diff took too long and hit the deadline or number of diffs equals number of characters, no commonality at all. + return []Diff{ + Diff{DiffDelete, string(runes1)}, + Diff{DiffInsert, string(runes2)}, + } +} + +func (dmp *DiffMatchPatch) diffBisectSplit(runes1, runes2 []rune, x, y int, + deadline time.Time) []Diff { + runes1a := runes1[:x] + runes2a := runes2[:y] + runes1b := runes1[x:] + runes2b := runes2[y:] + + // Compute both diffs serially. + diffs := dmp.diffMainRunes(runes1a, runes2a, false, deadline) + diffsb := dmp.diffMainRunes(runes1b, runes2b, false, deadline) + + return append(diffs, diffsb...) +} + +// DiffLinesToChars splits two texts into a list of strings, and educes the texts to a string of hashes where each Unicode character represents one line. +// It's slightly faster to call DiffLinesToRunes first, followed by DiffMainRunes. +func (dmp *DiffMatchPatch) DiffLinesToChars(text1, text2 string) (string, string, []string) { + chars1, chars2, lineArray := dmp.DiffLinesToRunes(text1, text2) + return string(chars1), string(chars2), lineArray +} + +// DiffLinesToRunes splits two texts into a list of runes. Each rune represents one line. +func (dmp *DiffMatchPatch) DiffLinesToRunes(text1, text2 string) ([]rune, []rune, []string) { + // '\x00' is a valid character, but various debuggers don't like it. So we'll insert a junk entry to avoid generating a null character. + lineArray := []string{""} // e.g. lineArray[4] == 'Hello\n' + lineHash := map[string]int{} // e.g. lineHash['Hello\n'] == 4 + + chars1 := dmp.diffLinesToRunesMunge(text1, &lineArray, lineHash) + chars2 := dmp.diffLinesToRunesMunge(text2, &lineArray, lineHash) + + return chars1, chars2, lineArray +} + +func (dmp *DiffMatchPatch) diffLinesToRunes(text1, text2 []rune) ([]rune, []rune, []string) { + return dmp.DiffLinesToRunes(string(text1), string(text2)) +} + +// diffLinesToRunesMunge splits a text into an array of strings, and reduces the texts to a []rune where each Unicode character represents one line. +// We use strings instead of []runes as input mainly because you can't use []rune as a map key. +func (dmp *DiffMatchPatch) diffLinesToRunesMunge(text string, lineArray *[]string, lineHash map[string]int) []rune { + // Walk the text, pulling out a substring for each line. text.split('\n') would would temporarily double our memory footprint. Modifying text would create many large strings to garbage collect. + lineStart := 0 + lineEnd := -1 + runes := []rune{} + + for lineEnd < len(text)-1 { + lineEnd = indexOf(text, "\n", lineStart) + + if lineEnd == -1 { + lineEnd = len(text) - 1 + } + + line := text[lineStart : lineEnd+1] + lineStart = lineEnd + 1 + lineValue, ok := lineHash[line] + + if ok { + runes = append(runes, rune(lineValue)) + } else { + *lineArray = append(*lineArray, line) + lineHash[line] = len(*lineArray) - 1 + runes = append(runes, rune(len(*lineArray)-1)) + } + } + + return runes +} + +// DiffCharsToLines rehydrates the text in a diff from a string of line hashes to real lines of text. +func (dmp *DiffMatchPatch) DiffCharsToLines(diffs []Diff, lineArray []string) []Diff { + hydrated := make([]Diff, 0, len(diffs)) + for _, aDiff := range diffs { + chars := aDiff.Text + text := make([]string, len(chars)) + + for i, r := range chars { + text[i] = lineArray[r] + } + + aDiff.Text = strings.Join(text, "") + hydrated = append(hydrated, aDiff) + } + return hydrated +} + +// DiffCommonPrefix determines the common prefix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonPrefix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonPrefixLength([]rune(text1), []rune(text2)) +} + +// DiffCommonSuffix determines the common suffix length of two strings. +func (dmp *DiffMatchPatch) DiffCommonSuffix(text1, text2 string) int { + // Unused in this code, but retained for interface compatibility. + return commonSuffixLength([]rune(text1), []rune(text2)) +} + +// commonPrefixLength returns the length of the common prefix of two rune slices. +func commonPrefixLength(text1, text2 []rune) int { + short, long := text1, text2 + if len(short) > len(long) { + short, long = long, short + } + for i, r := range short { + if r != long[i] { + return i + } + } + return len(short) +} + +// commonSuffixLength returns the length of the common suffix of two rune slices. +func commonSuffixLength(text1, text2 []rune) int { + n := min(len(text1), len(text2)) + for i := 0; i < n; i++ { + if text1[len(text1)-i-1] != text2[len(text2)-i-1] { + return i + } + } + return n + + // TODO research and benchmark this, why is it not activated? https://github.com/sergi/go-diff/issues/54 + // Binary search. + // Performance analysis: http://neil.fraser.name/news/2007/10/09/ + /* + pointermin := 0 + pointermax := math.Min(len(text1), len(text2)) + pointermid := pointermax + pointerend := 0 + for pointermin < pointermid { + if text1[len(text1)-pointermid:len(text1)-pointerend] == + text2[len(text2)-pointermid:len(text2)-pointerend] { + pointermin = pointermid + pointerend = pointermin + } else { + pointermax = pointermid + } + pointermid = math.Floor((pointermax-pointermin)/2 + pointermin) + } + return pointermid + */ +} + +// DiffCommonOverlap determines if the suffix of one string is the prefix of another. +func (dmp *DiffMatchPatch) DiffCommonOverlap(text1 string, text2 string) int { + // Cache the text lengths to prevent multiple calls. + text1Length := len(text1) + text2Length := len(text2) + // Eliminate the null case. + if text1Length == 0 || text2Length == 0 { + return 0 + } + // Truncate the longer string. + if text1Length > text2Length { + text1 = text1[text1Length-text2Length:] + } else if text1Length < text2Length { + text2 = text2[0:text1Length] + } + textLength := int(math.Min(float64(text1Length), float64(text2Length))) + // Quick check for the worst case. + if text1 == text2 { + return textLength + } + + // Start by looking for a single character match and increase length until no match is found. Performance analysis: http://neil.fraser.name/news/2010/11/04/ + best := 0 + length := 1 + for { + pattern := text1[textLength-length:] + found := strings.Index(text2, pattern) + if found == -1 { + break + } + length += found + if found == 0 || text1[textLength-length:] == text2[0:length] { + best = length + length++ + } + } + + return best +} + +// DiffHalfMatch checks whether the two texts share a substring which is at least half the length of the longer text. This speedup can produce non-minimal diffs. +func (dmp *DiffMatchPatch) DiffHalfMatch(text1, text2 string) []string { + // Unused in this code, but retained for interface compatibility. + runeSlices := dmp.diffHalfMatch([]rune(text1), []rune(text2)) + if runeSlices == nil { + return nil + } + + result := make([]string, len(runeSlices)) + for i, r := range runeSlices { + result[i] = string(r) + } + return result +} + +func (dmp *DiffMatchPatch) diffHalfMatch(text1, text2 []rune) [][]rune { + if dmp.DiffTimeout <= 0 { + // Don't risk returning a non-optimal diff if we have unlimited time. + return nil + } + + var longtext, shorttext []rune + if len(text1) > len(text2) { + longtext = text1 + shorttext = text2 + } else { + longtext = text2 + shorttext = text1 + } + + if len(longtext) < 4 || len(shorttext)*2 < len(longtext) { + return nil // Pointless. + } + + // First check if the second quarter is the seed for a half-match. + hm1 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+3)/4)) + + // Check again based on the third quarter. + hm2 := dmp.diffHalfMatchI(longtext, shorttext, int(float64(len(longtext)+1)/2)) + + hm := [][]rune{} + if hm1 == nil && hm2 == nil { + return nil + } else if hm2 == nil { + hm = hm1 + } else if hm1 == nil { + hm = hm2 + } else { + // Both matched. Select the longest. + if len(hm1[4]) > len(hm2[4]) { + hm = hm1 + } else { + hm = hm2 + } + } + + // A half-match was found, sort out the return data. + if len(text1) > len(text2) { + return hm + } + + return [][]rune{hm[2], hm[3], hm[0], hm[1], hm[4]} +} + +// diffHalfMatchI checks if a substring of shorttext exist within longtext such that the substring is at least half the length of longtext? +// Returns a slice containing the prefix of longtext, the suffix of longtext, the prefix of shorttext, the suffix of shorttext and the common middle, or null if there was no match. +func (dmp *DiffMatchPatch) diffHalfMatchI(l, s []rune, i int) [][]rune { + var bestCommonA []rune + var bestCommonB []rune + var bestCommonLen int + var bestLongtextA []rune + var bestLongtextB []rune + var bestShorttextA []rune + var bestShorttextB []rune + + // Start with a 1/4 length substring at position i as a seed. + seed := l[i : i+len(l)/4] + + for j := runesIndexOf(s, seed, 0); j != -1; j = runesIndexOf(s, seed, j+1) { + prefixLength := commonPrefixLength(l[i:], s[j:]) + suffixLength := commonSuffixLength(l[:i], s[:j]) + + if bestCommonLen < suffixLength+prefixLength { + bestCommonA = s[j-suffixLength : j] + bestCommonB = s[j : j+prefixLength] + bestCommonLen = len(bestCommonA) + len(bestCommonB) + bestLongtextA = l[:i-suffixLength] + bestLongtextB = l[i+prefixLength:] + bestShorttextA = s[:j-suffixLength] + bestShorttextB = s[j+prefixLength:] + } + } + + if bestCommonLen*2 < len(l) { + return nil + } + + return [][]rune{ + bestLongtextA, + bestLongtextB, + bestShorttextA, + bestShorttextB, + append(bestCommonA, bestCommonB...), + } +} + +// DiffCleanupSemantic reduces the number of edits by eliminating semantically trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupSemantic(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + + var lastequality string + // Always equal to diffs[equalities[equalitiesLength - 1]][1] + var pointer int // Index of current position. + // Number of characters that changed prior to the equality. + var lengthInsertions1, lengthDeletions1 int + // Number of characters that changed after the equality. + var lengthInsertions2, lengthDeletions2 int + + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { + // Equality found. + + equalities = &equality{ + data: pointer, + next: equalities, + } + lengthInsertions1 = lengthInsertions2 + lengthDeletions1 = lengthDeletions2 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = diffs[pointer].Text + } else { + // An insertion or deletion. + + if diffs[pointer].Type == DiffInsert { + lengthInsertions2 += len(diffs[pointer].Text) + } else { + lengthDeletions2 += len(diffs[pointer].Text) + } + // Eliminate an equality that is smaller or equal to the edits on both sides of it. + difference1 := int(math.Max(float64(lengthInsertions1), float64(lengthDeletions1))) + difference2 := int(math.Max(float64(lengthInsertions2), float64(lengthDeletions2))) + if len(lastequality) > 0 && + (len(lastequality) <= difference1) && + (len(lastequality) <= difference2) { + // Duplicate record. + insPoint := equalities.data + diffs = append( + diffs[:insPoint], + append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + + lengthInsertions1 = 0 // Reset the counters. + lengthDeletions1 = 0 + lengthInsertions2 = 0 + lengthDeletions2 = 0 + lastequality = "" + changes = true + } + } + pointer++ + } + + // Normalize the diff. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + diffs = dmp.DiffCleanupSemanticLossless(diffs) + // Find any overlaps between deletions and insertions. + // e.g: <del>abcxxx</del><ins>xxxdef</ins> + // -> <del>abc</del>xxx<ins>def</ins> + // e.g: <del>xxxabc</del><ins>defxxx</ins> + // -> <ins>def</ins>xxx<del>abc</del> + // Only extract an overlap if it is as big as the edit ahead or behind it. + pointer = 1 + for pointer < len(diffs) { + if diffs[pointer-1].Type == DiffDelete && + diffs[pointer].Type == DiffInsert { + deletion := diffs[pointer-1].Text + insertion := diffs[pointer].Text + overlapLength1 := dmp.DiffCommonOverlap(deletion, insertion) + overlapLength2 := dmp.DiffCommonOverlap(insertion, deletion) + if overlapLength1 >= overlapLength2 { + if float64(overlapLength1) >= float64(len(deletion))/2 || + float64(overlapLength1) >= float64(len(insertion))/2 { + + // Overlap found. Insert an equality and trim the surrounding edits. + diffs = append( + diffs[:pointer], + append([]Diff{Diff{DiffEqual, insertion[:overlapLength1]}}, diffs[pointer:]...)...) + + diffs[pointer-1].Text = + deletion[0 : len(deletion)-overlapLength1] + diffs[pointer+1].Text = insertion[overlapLength1:] + pointer++ + } + } else { + if float64(overlapLength2) >= float64(len(deletion))/2 || + float64(overlapLength2) >= float64(len(insertion))/2 { + // Reverse overlap found. Insert an equality and swap and trim the surrounding edits. + overlap := Diff{DiffEqual, deletion[:overlapLength2]} + diffs = append( + diffs[:pointer], + append([]Diff{overlap}, diffs[pointer:]...)...) + + diffs[pointer-1].Type = DiffInsert + diffs[pointer-1].Text = insertion[0 : len(insertion)-overlapLength2] + diffs[pointer+1].Type = DiffDelete + diffs[pointer+1].Text = deletion[overlapLength2:] + pointer++ + } + } + pointer++ + } + pointer++ + } + + return diffs +} + +// Define some regex patterns for matching boundaries. +var ( + nonAlphaNumericRegex = regexp.MustCompile(`[^a-zA-Z0-9]`) + whitespaceRegex = regexp.MustCompile(`\s`) + linebreakRegex = regexp.MustCompile(`[\r\n]`) + blanklineEndRegex = regexp.MustCompile(`\n\r?\n$`) + blanklineStartRegex = regexp.MustCompile(`^\r?\n\r?\n`) +) + +// diffCleanupSemanticScore computes a score representing whether the internal boundary falls on logical boundaries. +// Scores range from 6 (best) to 0 (worst). Closure, but does not reference any external variables. +func diffCleanupSemanticScore(one, two string) int { + if len(one) == 0 || len(two) == 0 { + // Edges are the best. + return 6 + } + + // Each port of this function behaves slightly differently due to subtle differences in each language's definition of things like 'whitespace'. Since this function's purpose is largely cosmetic, the choice has been made to use each language's native features rather than force total conformity. + rune1, _ := utf8.DecodeLastRuneInString(one) + rune2, _ := utf8.DecodeRuneInString(two) + char1 := string(rune1) + char2 := string(rune2) + + nonAlphaNumeric1 := nonAlphaNumericRegex.MatchString(char1) + nonAlphaNumeric2 := nonAlphaNumericRegex.MatchString(char2) + whitespace1 := nonAlphaNumeric1 && whitespaceRegex.MatchString(char1) + whitespace2 := nonAlphaNumeric2 && whitespaceRegex.MatchString(char2) + lineBreak1 := whitespace1 && linebreakRegex.MatchString(char1) + lineBreak2 := whitespace2 && linebreakRegex.MatchString(char2) + blankLine1 := lineBreak1 && blanklineEndRegex.MatchString(one) + blankLine2 := lineBreak2 && blanklineEndRegex.MatchString(two) + + if blankLine1 || blankLine2 { + // Five points for blank lines. + return 5 + } else if lineBreak1 || lineBreak2 { + // Four points for line breaks. + return 4 + } else if nonAlphaNumeric1 && !whitespace1 && whitespace2 { + // Three points for end of sentences. + return 3 + } else if whitespace1 || whitespace2 { + // Two points for whitespace. + return 2 + } else if nonAlphaNumeric1 || nonAlphaNumeric2 { + // One point for non-alphanumeric. + return 1 + } + return 0 +} + +// DiffCleanupSemanticLossless looks for single edits surrounded on both sides by equalities which can be shifted sideways to align the edit to a word boundary. +// E.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came. +func (dmp *DiffMatchPatch) DiffCleanupSemanticLossless(diffs []Diff) []Diff { + pointer := 1 + + // Intentionally ignore the first and last element (don't need checking). + for pointer < len(diffs)-1 { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + + // This is a single edit surrounded by equalities. + equality1 := diffs[pointer-1].Text + edit := diffs[pointer].Text + equality2 := diffs[pointer+1].Text + + // First, shift the edit as far left as possible. + commonOffset := dmp.DiffCommonSuffix(equality1, edit) + if commonOffset > 0 { + commonString := edit[len(edit)-commonOffset:] + equality1 = equality1[0 : len(equality1)-commonOffset] + edit = commonString + edit[:len(edit)-commonOffset] + equality2 = commonString + equality2 + } + + // Second, step character by character right, looking for the best fit. + bestEquality1 := equality1 + bestEdit := edit + bestEquality2 := equality2 + bestScore := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + + for len(edit) != 0 && len(equality2) != 0 { + _, sz := utf8.DecodeRuneInString(edit) + if len(equality2) < sz || edit[:sz] != equality2[:sz] { + break + } + equality1 += edit[:sz] + edit = edit[sz:] + equality2[:sz] + equality2 = equality2[sz:] + score := diffCleanupSemanticScore(equality1, edit) + + diffCleanupSemanticScore(edit, equality2) + // The >= encourages trailing rather than leading whitespace on edits. + if score >= bestScore { + bestScore = score + bestEquality1 = equality1 + bestEdit = edit + bestEquality2 = equality2 + } + } + + if diffs[pointer-1].Text != bestEquality1 { + // We have an improvement, save it back to the diff. + if len(bestEquality1) != 0 { + diffs[pointer-1].Text = bestEquality1 + } else { + diffs = splice(diffs, pointer-1, 1) + pointer-- + } + + diffs[pointer].Text = bestEdit + if len(bestEquality2) != 0 { + diffs[pointer+1].Text = bestEquality2 + } else { + diffs = append(diffs[:pointer+1], diffs[pointer+2:]...) + pointer-- + } + } + } + pointer++ + } + + return diffs +} + +// DiffCleanupEfficiency reduces the number of edits by eliminating operationally trivial equalities. +func (dmp *DiffMatchPatch) DiffCleanupEfficiency(diffs []Diff) []Diff { + changes := false + // Stack of indices where equalities are found. + type equality struct { + data int + next *equality + } + var equalities *equality + // Always equal to equalities[equalitiesLength-1][1] + lastequality := "" + pointer := 0 // Index of current position. + // Is there an insertion operation before the last equality. + preIns := false + // Is there a deletion operation before the last equality. + preDel := false + // Is there an insertion operation after the last equality. + postIns := false + // Is there a deletion operation after the last equality. + postDel := false + for pointer < len(diffs) { + if diffs[pointer].Type == DiffEqual { // Equality found. + if len(diffs[pointer].Text) < dmp.DiffEditCost && + (postIns || postDel) { + // Candidate found. + equalities = &equality{ + data: pointer, + next: equalities, + } + preIns = postIns + preDel = postDel + lastequality = diffs[pointer].Text + } else { + // Not a candidate, and can never become one. + equalities = nil + lastequality = "" + } + postIns = false + postDel = false + } else { // An insertion or deletion. + if diffs[pointer].Type == DiffDelete { + postDel = true + } else { + postIns = true + } + + // Five types to be split: + // <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del> + // <ins>A</ins>X<ins>C</ins><del>D</del> + // <ins>A</ins><del>B</del>X<ins>C</ins> + // <ins>A</del>X<ins>C</ins><del>D</del> + // <ins>A</ins><del>B</del>X<del>C</del> + var sumPres int + if preIns { + sumPres++ + } + if preDel { + sumPres++ + } + if postIns { + sumPres++ + } + if postDel { + sumPres++ + } + if len(lastequality) > 0 && + ((preIns && preDel && postIns && postDel) || + ((len(lastequality) < dmp.DiffEditCost/2) && sumPres == 3)) { + + insPoint := equalities.data + + // Duplicate record. + diffs = append(diffs[:insPoint], + append([]Diff{Diff{DiffDelete, lastequality}}, diffs[insPoint:]...)...) + + // Change second copy to insert. + diffs[insPoint+1].Type = DiffInsert + // Throw away the equality we just deleted. + equalities = equalities.next + lastequality = "" + + if preIns && preDel { + // No changes made which could affect previous entry, keep going. + postIns = true + postDel = true + equalities = nil + } else { + if equalities != nil { + equalities = equalities.next + } + if equalities != nil { + pointer = equalities.data + } else { + pointer = -1 + } + postIns = false + postDel = false + } + changes = true + } + } + pointer++ + } + + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffCleanupMerge reorders and merges like edit sections. Merge equalities. +// Any edit section can move as long as it doesn't cross an equality. +func (dmp *DiffMatchPatch) DiffCleanupMerge(diffs []Diff) []Diff { + // Add a dummy entry at the end. + diffs = append(diffs, Diff{DiffEqual, ""}) + pointer := 0 + countDelete := 0 + countInsert := 0 + commonlength := 0 + textDelete := []rune(nil) + textInsert := []rune(nil) + + for pointer < len(diffs) { + switch diffs[pointer].Type { + case DiffInsert: + countInsert++ + textInsert = append(textInsert, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffDelete: + countDelete++ + textDelete = append(textDelete, []rune(diffs[pointer].Text)...) + pointer++ + break + case DiffEqual: + // Upon reaching an equality, check for prior redundancies. + if countDelete+countInsert > 1 { + if countDelete != 0 && countInsert != 0 { + // Factor out any common prefixies. + commonlength = commonPrefixLength(textInsert, textDelete) + if commonlength != 0 { + x := pointer - countDelete - countInsert + if x > 0 && diffs[x-1].Type == DiffEqual { + diffs[x-1].Text += string(textInsert[:commonlength]) + } else { + diffs = append([]Diff{Diff{DiffEqual, string(textInsert[:commonlength])}}, diffs...) + pointer++ + } + textInsert = textInsert[commonlength:] + textDelete = textDelete[commonlength:] + } + // Factor out any common suffixies. + commonlength = commonSuffixLength(textInsert, textDelete) + if commonlength != 0 { + insertIndex := len(textInsert) - commonlength + deleteIndex := len(textDelete) - commonlength + diffs[pointer].Text = string(textInsert[insertIndex:]) + diffs[pointer].Text + textInsert = textInsert[:insertIndex] + textDelete = textDelete[:deleteIndex] + } + } + // Delete the offending records and add the merged ones. + if countDelete == 0 { + diffs = splice(diffs, pointer-countInsert, + countDelete+countInsert, + Diff{DiffInsert, string(textInsert)}) + } else if countInsert == 0 { + diffs = splice(diffs, pointer-countDelete, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}) + } else { + diffs = splice(diffs, pointer-countDelete-countInsert, + countDelete+countInsert, + Diff{DiffDelete, string(textDelete)}, + Diff{DiffInsert, string(textInsert)}) + } + + pointer = pointer - countDelete - countInsert + 1 + if countDelete != 0 { + pointer++ + } + if countInsert != 0 { + pointer++ + } + } else if pointer != 0 && diffs[pointer-1].Type == DiffEqual { + // Merge this equality with the previous one. + diffs[pointer-1].Text += diffs[pointer].Text + diffs = append(diffs[:pointer], diffs[pointer+1:]...) + } else { + pointer++ + } + countInsert = 0 + countDelete = 0 + textDelete = nil + textInsert = nil + break + } + } + + if len(diffs[len(diffs)-1].Text) == 0 { + diffs = diffs[0 : len(diffs)-1] // Remove the dummy entry at the end. + } + + // Second pass: look for single edits surrounded on both sides by equalities which can be shifted sideways to eliminate an equality. E.g: A<ins>BA</ins>C -> <ins>AB</ins>AC + changes := false + pointer = 1 + // Intentionally ignore the first and last element (don't need checking). + for pointer < (len(diffs) - 1) { + if diffs[pointer-1].Type == DiffEqual && + diffs[pointer+1].Type == DiffEqual { + // This is a single edit surrounded by equalities. + if strings.HasSuffix(diffs[pointer].Text, diffs[pointer-1].Text) { + // Shift the edit over the previous equality. + diffs[pointer].Text = diffs[pointer-1].Text + + diffs[pointer].Text[:len(diffs[pointer].Text)-len(diffs[pointer-1].Text)] + diffs[pointer+1].Text = diffs[pointer-1].Text + diffs[pointer+1].Text + diffs = splice(diffs, pointer-1, 1) + changes = true + } else if strings.HasPrefix(diffs[pointer].Text, diffs[pointer+1].Text) { + // Shift the edit over the next equality. + diffs[pointer-1].Text += diffs[pointer+1].Text + diffs[pointer].Text = + diffs[pointer].Text[len(diffs[pointer+1].Text):] + diffs[pointer+1].Text + diffs = splice(diffs, pointer+1, 1) + changes = true + } + } + pointer++ + } + + // If shifts were made, the diff needs reordering and another shift sweep. + if changes { + diffs = dmp.DiffCleanupMerge(diffs) + } + + return diffs +} + +// DiffXIndex returns the equivalent location in s2. +func (dmp *DiffMatchPatch) DiffXIndex(diffs []Diff, loc int) int { + chars1 := 0 + chars2 := 0 + lastChars1 := 0 + lastChars2 := 0 + lastDiff := Diff{} + for i := 0; i < len(diffs); i++ { + aDiff := diffs[i] + if aDiff.Type != DiffInsert { + // Equality or deletion. + chars1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + // Equality or insertion. + chars2 += len(aDiff.Text) + } + if chars1 > loc { + // Overshot the location. + lastDiff = aDiff + break + } + lastChars1 = chars1 + lastChars2 = chars2 + } + if lastDiff.Type == DiffDelete { + // The location was deleted. + return lastChars2 + } + // Add the remaining character length. + return lastChars2 + (loc - lastChars1) +} + +// DiffPrettyHtml converts a []Diff into a pretty HTML report. +// It is intended as an example from which to write one's own display functions. +func (dmp *DiffMatchPatch) DiffPrettyHtml(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := strings.Replace(html.EscapeString(diff.Text), "\n", "¶<br>", -1) + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("<ins style=\"background:#e6ffe6;\">") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("</ins>") + case DiffDelete: + _, _ = buff.WriteString("<del style=\"background:#ffe6e6;\">") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("</del>") + case DiffEqual: + _, _ = buff.WriteString("<span>") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("</span>") + } + } + return buff.String() +} + +// DiffPrettyText converts a []Diff into a colored text report. +func (dmp *DiffMatchPatch) DiffPrettyText(diffs []Diff) string { + var buff bytes.Buffer + for _, diff := range diffs { + text := diff.Text + + switch diff.Type { + case DiffInsert: + _, _ = buff.WriteString("\x1b[32m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffDelete: + _, _ = buff.WriteString("\x1b[31m") + _, _ = buff.WriteString(text) + _, _ = buff.WriteString("\x1b[0m") + case DiffEqual: + _, _ = buff.WriteString(text) + } + } + + return buff.String() +} + +// DiffText1 computes and returns the source text (all equalities and deletions). +func (dmp *DiffMatchPatch) DiffText1(diffs []Diff) string { + //StringBuilder text = new StringBuilder() + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffInsert { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffText2 computes and returns the destination text (all equalities and insertions). +func (dmp *DiffMatchPatch) DiffText2(diffs []Diff) string { + var text bytes.Buffer + + for _, aDiff := range diffs { + if aDiff.Type != DiffDelete { + _, _ = text.WriteString(aDiff.Text) + } + } + return text.String() +} + +// DiffLevenshtein computes the Levenshtein distance that is the number of inserted, deleted or substituted characters. +func (dmp *DiffMatchPatch) DiffLevenshtein(diffs []Diff) int { + levenshtein := 0 + insertions := 0 + deletions := 0 + + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + insertions += len(aDiff.Text) + case DiffDelete: + deletions += len(aDiff.Text) + case DiffEqual: + // A deletion and an insertion is one substitution. + levenshtein += max(insertions, deletions) + insertions = 0 + deletions = 0 + } + } + + levenshtein += max(insertions, deletions) + return levenshtein +} + +// DiffToDelta crushes the diff into an encoded string which describes the operations required to transform text1 into text2. +// E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'. Operations are tab-separated. Inserted text is escaped using %xx notation. +func (dmp *DiffMatchPatch) DiffToDelta(diffs []Diff) string { + var text bytes.Buffer + for _, aDiff := range diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\t") + break + case DiffDelete: + _, _ = text.WriteString("-") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + case DiffEqual: + _, _ = text.WriteString("=") + _, _ = text.WriteString(strconv.Itoa(utf8.RuneCountInString(aDiff.Text))) + _, _ = text.WriteString("\t") + break + } + } + delta := text.String() + if len(delta) != 0 { + // Strip off trailing tab character. + delta = delta[0 : utf8.RuneCountInString(delta)-1] + delta = unescaper.Replace(delta) + } + return delta +} + +// DiffFromDelta given the original text1, and an encoded string which describes the operations required to transform text1 into text2, comAdde the full diff. +func (dmp *DiffMatchPatch) DiffFromDelta(text1 string, delta string) (diffs []Diff, err error) { + i := 0 + runes := []rune(text1) + + for _, token := range strings.Split(delta, "\t") { + if len(token) == 0 { + // Blank tokens are ok (from a trailing \t). + continue + } + + // Each token begins with a one character parameter which specifies the operation of this token (delete, insert, equality). + param := token[1:] + + switch op := token[0]; op { + case '+': + // Decode would Diff all "+" to " " + param = strings.Replace(param, "+", "%2b", -1) + param, err = url.QueryUnescape(param) + if err != nil { + return nil, err + } + if !utf8.ValidString(param) { + return nil, fmt.Errorf("invalid UTF-8 token: %q", param) + } + + diffs = append(diffs, Diff{DiffInsert, param}) + case '=', '-': + n, err := strconv.ParseInt(param, 10, 0) + if err != nil { + return nil, err + } else if n < 0 { + return nil, errors.New("Negative number in DiffFromDelta: " + param) + } + + i += int(n) + // Break out if we are out of bounds, go1.6 can't handle this very well + if i > len(runes) { + break + } + // Remember that string slicing is by byte - we want by rune here. + text := string(runes[i-int(n) : i]) + + if op == '=' { + diffs = append(diffs, Diff{DiffEqual, text}) + } else { + diffs = append(diffs, Diff{DiffDelete, text}) + } + default: + // Anything else is an error. + return nil, errors.New("Invalid diff operation in DiffFromDelta: " + string(token[0])) + } + } + + if i != len(runes) { + return nil, fmt.Errorf("Delta length (%v) is different from source text length (%v)", i, len(text1)) + } + + return diffs, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go new file mode 100644 index 00000000..d3acc32c --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go @@ -0,0 +1,46 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +// Package diffmatchpatch offers robust algorithms to perform the operations required for synchronizing plain text. +package diffmatchpatch + +import ( + "time" +) + +// DiffMatchPatch holds the configuration for diff-match-patch operations. +type DiffMatchPatch struct { + // Number of seconds to map a diff before giving up (0 for infinity). + DiffTimeout time.Duration + // Cost of an empty edit operation in terms of edit characters. + DiffEditCost int + // How far to search for a match (0 = exact location, 1000+ = broad match). A match this many characters away from the expected location will add 1.0 to the score (0.0 is a perfect match). + MatchDistance int + // When deleting a large block of text (over ~64 characters), how close do the contents have to be to match the expected contents. (0.0 = perfection, 1.0 = very loose). Note that MatchThreshold controls how closely the end points of a delete need to match. + PatchDeleteThreshold float64 + // Chunk size for context length. + PatchMargin int + // The number of bits in an int. + MatchMaxBits int + // At what point is no match declared (0.0 = perfection, 1.0 = very loose). + MatchThreshold float64 +} + +// New creates a new DiffMatchPatch object with default parameters. +func New() *DiffMatchPatch { + // Defaults. + return &DiffMatchPatch{ + DiffTimeout: time.Second, + DiffEditCost: 4, + MatchThreshold: 0.5, + MatchDistance: 1000, + PatchDeleteThreshold: 0.5, + PatchMargin: 4, + MatchMaxBits: 32, + } +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go new file mode 100644 index 00000000..17374e10 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/match.go @@ -0,0 +1,160 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "math" +) + +// MatchMain locates the best instance of 'pattern' in 'text' near 'loc'. +// Returns -1 if no match found. +func (dmp *DiffMatchPatch) MatchMain(text, pattern string, loc int) int { + // Check for null inputs not needed since null can't be passed in C#. + + loc = int(math.Max(0, math.Min(float64(loc), float64(len(text))))) + if text == pattern { + // Shortcut (potentially not guaranteed by the algorithm) + return 0 + } else if len(text) == 0 { + // Nothing to match. + return -1 + } else if loc+len(pattern) <= len(text) && text[loc:loc+len(pattern)] == pattern { + // Perfect match at the perfect spot! (Includes case of null pattern) + return loc + } + // Do a fuzzy compare. + return dmp.MatchBitap(text, pattern, loc) +} + +// MatchBitap locates the best instance of 'pattern' in 'text' near 'loc' using the Bitap algorithm. +// Returns -1 if no match was found. +func (dmp *DiffMatchPatch) MatchBitap(text, pattern string, loc int) int { + // Initialise the alphabet. + s := dmp.MatchAlphabet(pattern) + + // Highest score beyond which we give up. + scoreThreshold := dmp.MatchThreshold + // Is there a nearby exact match? (speedup) + bestLoc := indexOf(text, pattern, loc) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + // What about in the other direction? (speedup) + bestLoc = lastIndexOf(text, pattern, loc+len(pattern)) + if bestLoc != -1 { + scoreThreshold = math.Min(dmp.matchBitapScore(0, bestLoc, loc, + pattern), scoreThreshold) + } + } + + // Initialise the bit arrays. + matchmask := 1 << uint((len(pattern) - 1)) + bestLoc = -1 + + var binMin, binMid int + binMax := len(pattern) + len(text) + lastRd := []int{} + for d := 0; d < len(pattern); d++ { + // Scan for the best match; each iteration allows for one more error. Run a binary search to determine how far from 'loc' we can stray at this error level. + binMin = 0 + binMid = binMax + for binMin < binMid { + if dmp.matchBitapScore(d, loc+binMid, loc, pattern) <= scoreThreshold { + binMin = binMid + } else { + binMax = binMid + } + binMid = (binMax-binMin)/2 + binMin + } + // Use the result from this iteration as the maximum for the next. + binMax = binMid + start := int(math.Max(1, float64(loc-binMid+1))) + finish := int(math.Min(float64(loc+binMid), float64(len(text))) + float64(len(pattern))) + + rd := make([]int, finish+2) + rd[finish+1] = (1 << uint(d)) - 1 + + for j := finish; j >= start; j-- { + var charMatch int + if len(text) <= j-1 { + // Out of range. + charMatch = 0 + } else if _, ok := s[text[j-1]]; !ok { + charMatch = 0 + } else { + charMatch = s[text[j-1]] + } + + if d == 0 { + // First pass: exact match. + rd[j] = ((rd[j+1] << 1) | 1) & charMatch + } else { + // Subsequent passes: fuzzy match. + rd[j] = ((rd[j+1]<<1)|1)&charMatch | (((lastRd[j+1] | lastRd[j]) << 1) | 1) | lastRd[j+1] + } + if (rd[j] & matchmask) != 0 { + score := dmp.matchBitapScore(d, j-1, loc, pattern) + // This match will almost certainly be better than any existing match. But check anyway. + if score <= scoreThreshold { + // Told you so. + scoreThreshold = score + bestLoc = j - 1 + if bestLoc > loc { + // When passing loc, don't exceed our current distance from loc. + start = int(math.Max(1, float64(2*loc-bestLoc))) + } else { + // Already passed loc, downhill from here on in. + break + } + } + } + } + if dmp.matchBitapScore(d+1, loc, loc, pattern) > scoreThreshold { + // No hope for a (better) match at greater error levels. + break + } + lastRd = rd + } + return bestLoc +} + +// matchBitapScore computes and returns the score for a match with e errors and x location. +func (dmp *DiffMatchPatch) matchBitapScore(e, x, loc int, pattern string) float64 { + accuracy := float64(e) / float64(len(pattern)) + proximity := math.Abs(float64(loc - x)) + if dmp.MatchDistance == 0 { + // Dodge divide by zero error. + if proximity == 0 { + return accuracy + } + + return 1.0 + } + return accuracy + (proximity / float64(dmp.MatchDistance)) +} + +// MatchAlphabet initialises the alphabet for the Bitap algorithm. +func (dmp *DiffMatchPatch) MatchAlphabet(pattern string) map[byte]int { + s := map[byte]int{} + charPattern := []byte(pattern) + for _, c := range charPattern { + _, ok := s[c] + if !ok { + s[c] = 0 + } + } + i := 0 + + for _, c := range charPattern { + value := s[c] | int(uint(1)<<uint((len(pattern)-i-1))) + s[c] = value + i++ + } + return s +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go new file mode 100644 index 00000000..aed242bc --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go @@ -0,0 +1,23 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +func min(x, y int) int { + if x < y { + return x + } + return y +} + +func max(x, y int) int { + if x > y { + return x + } + return y +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go new file mode 100644 index 00000000..116c0434 --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go @@ -0,0 +1,556 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "bytes" + "errors" + "math" + "net/url" + "regexp" + "strconv" + "strings" +) + +// Patch represents one patch operation. +type Patch struct { + diffs []Diff + start1 int + start2 int + length1 int + length2 int +} + +// String emulates GNU diff's format. +// Header: @@ -382,8 +481,9 @@ +// Indicies are printed as 1-based, not 0-based. +func (p *Patch) String() string { + var coords1, coords2 string + + if p.length1 == 0 { + coords1 = strconv.Itoa(p.start1) + ",0" + } else if p.length1 == 1 { + coords1 = strconv.Itoa(p.start1 + 1) + } else { + coords1 = strconv.Itoa(p.start1+1) + "," + strconv.Itoa(p.length1) + } + + if p.length2 == 0 { + coords2 = strconv.Itoa(p.start2) + ",0" + } else if p.length2 == 1 { + coords2 = strconv.Itoa(p.start2 + 1) + } else { + coords2 = strconv.Itoa(p.start2+1) + "," + strconv.Itoa(p.length2) + } + + var text bytes.Buffer + _, _ = text.WriteString("@@ -" + coords1 + " +" + coords2 + " @@\n") + + // Escape the body of the patch with %xx notation. + for _, aDiff := range p.diffs { + switch aDiff.Type { + case DiffInsert: + _, _ = text.WriteString("+") + case DiffDelete: + _, _ = text.WriteString("-") + case DiffEqual: + _, _ = text.WriteString(" ") + } + + _, _ = text.WriteString(strings.Replace(url.QueryEscape(aDiff.Text), "+", " ", -1)) + _, _ = text.WriteString("\n") + } + + return unescaper.Replace(text.String()) +} + +// PatchAddContext increases the context until it is unique, but doesn't let the pattern expand beyond MatchMaxBits. +func (dmp *DiffMatchPatch) PatchAddContext(patch Patch, text string) Patch { + if len(text) == 0 { + return patch + } + + pattern := text[patch.start2 : patch.start2+patch.length1] + padding := 0 + + // Look for the first and last matches of pattern in text. If two different matches are found, increase the pattern length. + for strings.Index(text, pattern) != strings.LastIndex(text, pattern) && + len(pattern) < dmp.MatchMaxBits-2*dmp.PatchMargin { + padding += dmp.PatchMargin + maxStart := max(0, patch.start2-padding) + minEnd := min(len(text), patch.start2+patch.length1+padding) + pattern = text[maxStart:minEnd] + } + // Add one chunk for good luck. + padding += dmp.PatchMargin + + // Add the prefix. + prefix := text[max(0, patch.start2-padding):patch.start2] + if len(prefix) != 0 { + patch.diffs = append([]Diff{Diff{DiffEqual, prefix}}, patch.diffs...) + } + // Add the suffix. + suffix := text[patch.start2+patch.length1 : min(len(text), patch.start2+patch.length1+padding)] + if len(suffix) != 0 { + patch.diffs = append(patch.diffs, Diff{DiffEqual, suffix}) + } + + // Roll back the start points. + patch.start1 -= len(prefix) + patch.start2 -= len(prefix) + // Extend the lengths. + patch.length1 += len(prefix) + len(suffix) + patch.length2 += len(prefix) + len(suffix) + + return patch +} + +// PatchMake computes a list of patches. +func (dmp *DiffMatchPatch) PatchMake(opt ...interface{}) []Patch { + if len(opt) == 1 { + diffs, _ := opt[0].([]Diff) + text1 := dmp.DiffText1(diffs) + return dmp.PatchMake(text1, diffs) + } else if len(opt) == 2 { + text1 := opt[0].(string) + switch t := opt[1].(type) { + case string: + diffs := dmp.DiffMain(text1, t, true) + if len(diffs) > 2 { + diffs = dmp.DiffCleanupSemantic(diffs) + diffs = dmp.DiffCleanupEfficiency(diffs) + } + return dmp.PatchMake(text1, diffs) + case []Diff: + return dmp.patchMake2(text1, t) + } + } else if len(opt) == 3 { + return dmp.PatchMake(opt[0], opt[2]) + } + return []Patch{} +} + +// patchMake2 computes a list of patches to turn text1 into text2. +// text2 is not provided, diffs are the delta between text1 and text2. +func (dmp *DiffMatchPatch) patchMake2(text1 string, diffs []Diff) []Patch { + // Check for null inputs not needed since null can't be passed in C#. + patches := []Patch{} + if len(diffs) == 0 { + return patches // Get rid of the null case. + } + + patch := Patch{} + charCount1 := 0 // Number of characters into the text1 string. + charCount2 := 0 // Number of characters into the text2 string. + // Start with text1 (prepatchText) and apply the diffs until we arrive at text2 (postpatchText). We recreate the patches one by one to determine context info. + prepatchText := text1 + postpatchText := text1 + + for i, aDiff := range diffs { + if len(patch.diffs) == 0 && aDiff.Type != DiffEqual { + // A new patch starts here. + patch.start1 = charCount1 + patch.start2 = charCount2 + } + + switch aDiff.Type { + case DiffInsert: + patch.diffs = append(patch.diffs, aDiff) + patch.length2 += len(aDiff.Text) + postpatchText = postpatchText[:charCount2] + + aDiff.Text + postpatchText[charCount2:] + case DiffDelete: + patch.length1 += len(aDiff.Text) + patch.diffs = append(patch.diffs, aDiff) + postpatchText = postpatchText[:charCount2] + postpatchText[charCount2+len(aDiff.Text):] + case DiffEqual: + if len(aDiff.Text) <= 2*dmp.PatchMargin && + len(patch.diffs) != 0 && i != len(diffs)-1 { + // Small equality inside a patch. + patch.diffs = append(patch.diffs, aDiff) + patch.length1 += len(aDiff.Text) + patch.length2 += len(aDiff.Text) + } + if len(aDiff.Text) >= 2*dmp.PatchMargin { + // Time for a new patch. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + patch = Patch{} + // Unlike Unidiff, our patch lists have a rolling context. http://code.google.com/p/google-diff-match-patch/wiki/Unidiff Update prepatch text & pos to reflect the application of the just completed patch. + prepatchText = postpatchText + charCount1 = charCount2 + } + } + } + + // Update the current character count. + if aDiff.Type != DiffInsert { + charCount1 += len(aDiff.Text) + } + if aDiff.Type != DiffDelete { + charCount2 += len(aDiff.Text) + } + } + + // Pick up the leftover patch if not empty. + if len(patch.diffs) != 0 { + patch = dmp.PatchAddContext(patch, prepatchText) + patches = append(patches, patch) + } + + return patches +} + +// PatchDeepCopy returns an array that is identical to a given an array of patches. +func (dmp *DiffMatchPatch) PatchDeepCopy(patches []Patch) []Patch { + patchesCopy := []Patch{} + for _, aPatch := range patches { + patchCopy := Patch{} + for _, aDiff := range aPatch.diffs { + patchCopy.diffs = append(patchCopy.diffs, Diff{ + aDiff.Type, + aDiff.Text, + }) + } + patchCopy.start1 = aPatch.start1 + patchCopy.start2 = aPatch.start2 + patchCopy.length1 = aPatch.length1 + patchCopy.length2 = aPatch.length2 + patchesCopy = append(patchesCopy, patchCopy) + } + return patchesCopy +} + +// PatchApply merges a set of patches onto the text. Returns a patched text, as well as an array of true/false values indicating which patches were applied. +func (dmp *DiffMatchPatch) PatchApply(patches []Patch, text string) (string, []bool) { + if len(patches) == 0 { + return text, []bool{} + } + + // Deep copy the patches so that no changes are made to originals. + patches = dmp.PatchDeepCopy(patches) + + nullPadding := dmp.PatchAddPadding(patches) + text = nullPadding + text + nullPadding + patches = dmp.PatchSplitMax(patches) + + x := 0 + // delta keeps track of the offset between the expected and actual location of the previous patch. If there are patches expected at positions 10 and 20, but the first patch was found at 12, delta is 2 and the second patch has an effective expected position of 22. + delta := 0 + results := make([]bool, len(patches)) + for _, aPatch := range patches { + expectedLoc := aPatch.start2 + delta + text1 := dmp.DiffText1(aPatch.diffs) + var startLoc int + endLoc := -1 + if len(text1) > dmp.MatchMaxBits { + // PatchSplitMax will only provide an oversized pattern in the case of a monster delete. + startLoc = dmp.MatchMain(text, text1[:dmp.MatchMaxBits], expectedLoc) + if startLoc != -1 { + endLoc = dmp.MatchMain(text, + text1[len(text1)-dmp.MatchMaxBits:], expectedLoc+len(text1)-dmp.MatchMaxBits) + if endLoc == -1 || startLoc >= endLoc { + // Can't find valid trailing context. Drop this patch. + startLoc = -1 + } + } + } else { + startLoc = dmp.MatchMain(text, text1, expectedLoc) + } + if startLoc == -1 { + // No match found. :( + results[x] = false + // Subtract the delta for this failed patch from subsequent patches. + delta -= aPatch.length2 - aPatch.length1 + } else { + // Found a match. :) + results[x] = true + delta = startLoc - expectedLoc + var text2 string + if endLoc == -1 { + text2 = text[startLoc:int(math.Min(float64(startLoc+len(text1)), float64(len(text))))] + } else { + text2 = text[startLoc:int(math.Min(float64(endLoc+dmp.MatchMaxBits), float64(len(text))))] + } + if text1 == text2 { + // Perfect match, just shove the Replacement text in. + text = text[:startLoc] + dmp.DiffText2(aPatch.diffs) + text[startLoc+len(text1):] + } else { + // Imperfect match. Run a diff to get a framework of equivalent indices. + diffs := dmp.DiffMain(text1, text2, false) + if len(text1) > dmp.MatchMaxBits && float64(dmp.DiffLevenshtein(diffs))/float64(len(text1)) > dmp.PatchDeleteThreshold { + // The end points match, but the content is unacceptably bad. + results[x] = false + } else { + diffs = dmp.DiffCleanupSemanticLossless(diffs) + index1 := 0 + for _, aDiff := range aPatch.diffs { + if aDiff.Type != DiffEqual { + index2 := dmp.DiffXIndex(diffs, index1) + if aDiff.Type == DiffInsert { + // Insertion + text = text[:startLoc+index2] + aDiff.Text + text[startLoc+index2:] + } else if aDiff.Type == DiffDelete { + // Deletion + startIndex := startLoc + index2 + text = text[:startIndex] + + text[startIndex+dmp.DiffXIndex(diffs, index1+len(aDiff.Text))-index2:] + } + } + if aDiff.Type != DiffDelete { + index1 += len(aDiff.Text) + } + } + } + } + } + x++ + } + // Strip the padding off. + text = text[len(nullPadding) : len(nullPadding)+(len(text)-2*len(nullPadding))] + return text, results +} + +// PatchAddPadding adds some padding on text start and end so that edges can match something. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchAddPadding(patches []Patch) string { + paddingLength := dmp.PatchMargin + nullPadding := "" + for x := 1; x <= paddingLength; x++ { + nullPadding += string(x) + } + + // Bump all the patches forward. + for i := range patches { + patches[i].start1 += paddingLength + patches[i].start2 += paddingLength + } + + // Add some padding on start of first diff. + if len(patches[0].diffs) == 0 || patches[0].diffs[0].Type != DiffEqual { + // Add nullPadding equality. + patches[0].diffs = append([]Diff{Diff{DiffEqual, nullPadding}}, patches[0].diffs...) + patches[0].start1 -= paddingLength // Should be 0. + patches[0].start2 -= paddingLength // Should be 0. + patches[0].length1 += paddingLength + patches[0].length2 += paddingLength + } else if paddingLength > len(patches[0].diffs[0].Text) { + // Grow first equality. + extraLength := paddingLength - len(patches[0].diffs[0].Text) + patches[0].diffs[0].Text = nullPadding[len(patches[0].diffs[0].Text):] + patches[0].diffs[0].Text + patches[0].start1 -= extraLength + patches[0].start2 -= extraLength + patches[0].length1 += extraLength + patches[0].length2 += extraLength + } + + // Add some padding on end of last diff. + last := len(patches) - 1 + if len(patches[last].diffs) == 0 || patches[last].diffs[len(patches[last].diffs)-1].Type != DiffEqual { + // Add nullPadding equality. + patches[last].diffs = append(patches[last].diffs, Diff{DiffEqual, nullPadding}) + patches[last].length1 += paddingLength + patches[last].length2 += paddingLength + } else if paddingLength > len(patches[last].diffs[len(patches[last].diffs)-1].Text) { + // Grow last equality. + lastDiff := patches[last].diffs[len(patches[last].diffs)-1] + extraLength := paddingLength - len(lastDiff.Text) + patches[last].diffs[len(patches[last].diffs)-1].Text += nullPadding[:extraLength] + patches[last].length1 += extraLength + patches[last].length2 += extraLength + } + + return nullPadding +} + +// PatchSplitMax looks through the patches and breaks up any which are longer than the maximum limit of the match algorithm. +// Intended to be called only from within patchApply. +func (dmp *DiffMatchPatch) PatchSplitMax(patches []Patch) []Patch { + patchSize := dmp.MatchMaxBits + for x := 0; x < len(patches); x++ { + if patches[x].length1 <= patchSize { + continue + } + bigpatch := patches[x] + // Remove the big old patch. + patches = append(patches[:x], patches[x+1:]...) + x-- + + start1 := bigpatch.start1 + start2 := bigpatch.start2 + precontext := "" + for len(bigpatch.diffs) != 0 { + // Create one of several smaller patches. + patch := Patch{} + empty := true + patch.start1 = start1 - len(precontext) + patch.start2 = start2 - len(precontext) + if len(precontext) != 0 { + patch.length1 = len(precontext) + patch.length2 = len(precontext) + patch.diffs = append(patch.diffs, Diff{DiffEqual, precontext}) + } + for len(bigpatch.diffs) != 0 && patch.length1 < patchSize-dmp.PatchMargin { + diffType := bigpatch.diffs[0].Type + diffText := bigpatch.diffs[0].Text + if diffType == DiffInsert { + // Insertions are harmless. + patch.length2 += len(diffText) + start2 += len(diffText) + patch.diffs = append(patch.diffs, bigpatch.diffs[0]) + bigpatch.diffs = bigpatch.diffs[1:] + empty = false + } else if diffType == DiffDelete && len(patch.diffs) == 1 && patch.diffs[0].Type == DiffEqual && len(diffText) > 2*patchSize { + // This is a large deletion. Let it pass in one chunk. + patch.length1 += len(diffText) + start1 += len(diffText) + empty = false + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + bigpatch.diffs = bigpatch.diffs[1:] + } else { + // Deletion or equality. Only take as much as we can stomach. + diffText = diffText[:min(len(diffText), patchSize-patch.length1-dmp.PatchMargin)] + + patch.length1 += len(diffText) + start1 += len(diffText) + if diffType == DiffEqual { + patch.length2 += len(diffText) + start2 += len(diffText) + } else { + empty = false + } + patch.diffs = append(patch.diffs, Diff{diffType, diffText}) + if diffText == bigpatch.diffs[0].Text { + bigpatch.diffs = bigpatch.diffs[1:] + } else { + bigpatch.diffs[0].Text = + bigpatch.diffs[0].Text[len(diffText):] + } + } + } + // Compute the head context for the next patch. + precontext = dmp.DiffText2(patch.diffs) + precontext = precontext[max(0, len(precontext)-dmp.PatchMargin):] + + postcontext := "" + // Append the end context for this patch. + if len(dmp.DiffText1(bigpatch.diffs)) > dmp.PatchMargin { + postcontext = dmp.DiffText1(bigpatch.diffs)[:dmp.PatchMargin] + } else { + postcontext = dmp.DiffText1(bigpatch.diffs) + } + + if len(postcontext) != 0 { + patch.length1 += len(postcontext) + patch.length2 += len(postcontext) + if len(patch.diffs) != 0 && patch.diffs[len(patch.diffs)-1].Type == DiffEqual { + patch.diffs[len(patch.diffs)-1].Text += postcontext + } else { + patch.diffs = append(patch.diffs, Diff{DiffEqual, postcontext}) + } + } + if !empty { + x++ + patches = append(patches[:x], append([]Patch{patch}, patches[x:]...)...) + } + } + } + return patches +} + +// PatchToText takes a list of patches and returns a textual representation. +func (dmp *DiffMatchPatch) PatchToText(patches []Patch) string { + var text bytes.Buffer + for _, aPatch := range patches { + _, _ = text.WriteString(aPatch.String()) + } + return text.String() +} + +// PatchFromText parses a textual representation of patches and returns a List of Patch objects. +func (dmp *DiffMatchPatch) PatchFromText(textline string) ([]Patch, error) { + patches := []Patch{} + if len(textline) == 0 { + return patches, nil + } + text := strings.Split(textline, "\n") + textPointer := 0 + patchHeader := regexp.MustCompile("^@@ -(\\d+),?(\\d*) \\+(\\d+),?(\\d*) @@$") + + var patch Patch + var sign uint8 + var line string + for textPointer < len(text) { + + if !patchHeader.MatchString(text[textPointer]) { + return patches, errors.New("Invalid patch string: " + text[textPointer]) + } + + patch = Patch{} + m := patchHeader.FindStringSubmatch(text[textPointer]) + + patch.start1, _ = strconv.Atoi(m[1]) + if len(m[2]) == 0 { + patch.start1-- + patch.length1 = 1 + } else if m[2] == "0" { + patch.length1 = 0 + } else { + patch.start1-- + patch.length1, _ = strconv.Atoi(m[2]) + } + + patch.start2, _ = strconv.Atoi(m[3]) + + if len(m[4]) == 0 { + patch.start2-- + patch.length2 = 1 + } else if m[4] == "0" { + patch.length2 = 0 + } else { + patch.start2-- + patch.length2, _ = strconv.Atoi(m[4]) + } + textPointer++ + + for textPointer < len(text) { + if len(text[textPointer]) > 0 { + sign = text[textPointer][0] + } else { + textPointer++ + continue + } + + line = text[textPointer][1:] + line = strings.Replace(line, "+", "%2b", -1) + line, _ = url.QueryUnescape(line) + if sign == '-' { + // Deletion. + patch.diffs = append(patch.diffs, Diff{DiffDelete, line}) + } else if sign == '+' { + // Insertion. + patch.diffs = append(patch.diffs, Diff{DiffInsert, line}) + } else if sign == ' ' { + // Minor equality. + patch.diffs = append(patch.diffs, Diff{DiffEqual, line}) + } else if sign == '@' { + // Start of next patch. + break + } else { + // WTF? + return patches, errors.New("Invalid patch mode '" + string(sign) + "' in: " + string(line)) + } + textPointer++ + } + + patches = append(patches, patch) + } + return patches, nil +} diff --git a/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go new file mode 100644 index 00000000..265f29cc --- /dev/null +++ b/vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go @@ -0,0 +1,88 @@ +// Copyright (c) 2012-2016 The go-diff authors. All rights reserved. +// https://github.com/sergi/go-diff +// See the included LICENSE file for license details. +// +// go-diff is a Go implementation of Google's Diff, Match, and Patch library +// Original library is Copyright (c) 2006 Google Inc. +// http://code.google.com/p/google-diff-match-patch/ + +package diffmatchpatch + +import ( + "strings" + "unicode/utf8" +) + +// unescaper unescapes selected chars for compatibility with JavaScript's encodeURI. +// In speed critical applications this could be dropped since the receiving application will certainly decode these fine. Note that this function is case-sensitive. Thus "%3F" would not be unescaped. But this is ok because it is only called with the output of HttpUtility.UrlEncode which returns lowercase hex. Example: "%3f" -> "?", "%24" -> "$", etc. +var unescaper = strings.NewReplacer( + "%21", "!", "%7E", "~", "%27", "'", + "%28", "(", "%29", ")", "%3B", ";", + "%2F", "/", "%3F", "?", "%3A", ":", + "%40", "@", "%26", "&", "%3D", "=", + "%2B", "+", "%24", "$", "%2C", ",", "%23", "#", "%2A", "*") + +// indexOf returns the first index of pattern in str, starting at str[i]. +func indexOf(str string, pattern string, i int) int { + if i > len(str)-1 { + return -1 + } + if i <= 0 { + return strings.Index(str, pattern) + } + ind := strings.Index(str[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +// lastIndexOf returns the last index of pattern in str, starting at str[i]. +func lastIndexOf(str string, pattern string, i int) int { + if i < 0 { + return -1 + } + if i >= len(str) { + return strings.LastIndex(str, pattern) + } + _, size := utf8.DecodeRuneInString(str[i:]) + return strings.LastIndex(str[:i+size], pattern) +} + +// runesIndexOf returns the index of pattern in target, starting at target[i]. +func runesIndexOf(target, pattern []rune, i int) int { + if i > len(target)-1 { + return -1 + } + if i <= 0 { + return runesIndex(target, pattern) + } + ind := runesIndex(target[i:], pattern) + if ind == -1 { + return -1 + } + return ind + i +} + +func runesEqual(r1, r2 []rune) bool { + if len(r1) != len(r2) { + return false + } + for i, c := range r1 { + if c != r2[i] { + return false + } + } + return true +} + +// runesIndex is the equivalent of strings.Index for rune slices. +func runesIndex(r1, r2 []rune) int { + last := len(r1) - len(r2) + for i := 0; i <= last; i++ { + if runesEqual(r1[i:i+len(r2)], r2) { + return i + } + } + return -1 +} diff --git a/vendor/github.com/yudai/gojsondiff/LICENSE b/vendor/github.com/yudai/gojsondiff/LICENSE new file mode 100644 index 00000000..445f43cd --- /dev/null +++ b/vendor/github.com/yudai/gojsondiff/LICENSE @@ -0,0 +1,145 @@ +The MIT License (MIT) + +Copyright (c) 2015 Iwasaki Yudai + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +============================================================================ + +This repository is build with following third party libraries. Thank you! + +## go-diff - https://github.com/sergi/go-diff + +Copyright (c) 2012 Sergi Mansilla <sergi.mansilla@gmail.com> + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + + +## golcs - https://github.com/yudai/golcs + +The MIT License (MIT) + +Copyright (c) 2015 Iwasaki Yudai + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +## cli.go - https://github.com/urfave/cli + +Copyright (C) 2013 Jeremy Saenz +All Rights Reserved. + +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +## ginkgo - https://github.com/onsi/ginkgo + +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +# gomega - https://github.com/onsi/gomega + +Copyright (c) 2013-2014 Onsi Fakhouri + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/yudai/gojsondiff/Makefile b/vendor/github.com/yudai/gojsondiff/Makefile new file mode 100644 index 00000000..50ed3d60 --- /dev/null +++ b/vendor/github.com/yudai/gojsondiff/Makefile @@ -0,0 +1,2 @@ +test: + if [ `go fmt $(go list ./... | grep -v /vendor/) | wc -l` -gt 0 ]; then echo "go fmt error"; exit 1; fi diff --git a/vendor/github.com/yudai/gojsondiff/README.md b/vendor/github.com/yudai/gojsondiff/README.md new file mode 100644 index 00000000..2f0f6f8c --- /dev/null +++ b/vendor/github.com/yudai/gojsondiff/README.md @@ -0,0 +1,157 @@ +# Go JSON Diff (and Patch) + +[][wercker] +[][godoc] +[][license] + +[wercker]: https://app.wercker.com/project/bykey/00d70daaf40ce277fd4f10290f097b9d +[godoc]: https://godoc.org/github.com/yudai/gojsondiff +[license]: https://github.com/yudai/gojsondiff/blob/master/LICENSE + +## How to use + +### Installation + +```sh +go get github.com/yudai/gojsondiff +``` + +### Comparing two JSON strings + +See `jd/main.go` for how to use this library. + + +## CLI tool + +This repository contains a package that you can use as a CLI tool. + +### Installation + +```sh +go get github.com/yudai/gojsondiff/jd +``` + +### Usage + +#### Diff + +Just give two json files to the `jd` command: + +```sh +jd one.json another.json +``` + +Outputs would be something like: + +```diff + { + "arr": [ + 0: "arr0", + 1: 21, + 2: { + "num": 1, +- "str": "pek3f" ++ "str": "changed" + }, + 3: [ + 0: 0, +- 1: "1" ++ 1: "changed" + ] + ], + "bool": true, + "num_float": 39.39, + "num_int": 13, + "obj": { + "arr": [ + 0: 17, + 1: "str", + 2: { +- "str": "eafeb" ++ "str": "changed" + } + ], ++ "new": "added", +- "num": 19, + "obj": { +- "num": 14, ++ "num": 9999 +- "str": "efj3" ++ "str": "changed" + }, + "str": "bcded" + }, + "str": "abcde" + } +``` + +When you prefer the delta foramt of [jsondiffpatch](https://github.com/benjamine/jsondiffpatch), add the `-f delta` option. + +```sh +jd -f delta one.json another.json +``` + +This command shows: + +```json +{ + "arr": { + "2": { + "str": [ + "pek3f", + "changed" + ] + }, + "3": { + "1": [ + "1", + "changed" + ], + "_t": "a" + }, + "_t": "a" + }, + "obj": { + "arr": { + "2": { + "str": [ + "eafeb", + "changed" + ] + }, + "_t": "a" + }, + "new": [ + "added" + ], + "num": [ + 19, + 0, + 0 + ], + "obj": { + "num": [ + 14, + 9999 + ], + "str": [ + "efj3", + "changed" + ] + } + } +} +``` + +#### Patch + +Give a diff file in the delta format and the JSON file to the `jp` command. + +```sh +jp diff.delta one.json +``` + + +## License + +MIT License (see `LICENSE` for detail) diff --git a/vendor/github.com/yudai/gojsondiff/deltas.go b/vendor/github.com/yudai/gojsondiff/deltas.go new file mode 100644 index 00000000..403c5bf4 --- /dev/null +++ b/vendor/github.com/yudai/gojsondiff/deltas.go @@ -0,0 +1,461 @@ +package gojsondiff + +import ( + "errors" + dmp "github.com/sergi/go-diff/diffmatchpatch" + "reflect" + "strconv" +) + +// A Delta represents an atomic difference between two JSON objects. +type Delta interface { + // Similarity calculates the similarity of the Delta values. + // The return value is normalized from 0 to 1, + // 0 is completely different and 1 is they are same + Similarity() (similarity float64) +} + +// To cache the calculated similarity, +// concrete Deltas can use similariter and similarityCache +type similariter interface { + similarity() (similarity float64) +} + +type similarityCache struct { + similariter + value float64 +} + +func newSimilarityCache(sim similariter) similarityCache { + cache := similarityCache{similariter: sim, value: -1} + return cache +} + +func (cache similarityCache) Similarity() (similarity float64) { + if cache.value < 0 { + cache.value = cache.similariter.similarity() + } + return cache.value +} + +// A Position represents the position of a Delta in an object or an array. +type Position interface { + // String returns the position as a string + String() (name string) + + // CompareTo returns a true if the Position is smaller than another Position. + // This function is used to sort Positions by the sort package. + CompareTo(another Position) bool +} + +// A Name is a Postition with a string, which means the delta is in an object. +type Name string + +func (n Name) String() (name string) { + return string(n) +} + +func (n Name) CompareTo(another Position) bool { + return n < another.(Name) +} + +// A Index is a Position with an int value, which means the Delta is in an Array. +type Index int + +func (i Index) String() (name string) { + return strconv.Itoa(int(i)) +} + +func (i Index) CompareTo(another Position) bool { + return i < another.(Index) +} + +// A PreDelta is a Delta that has a position of the left side JSON object. +// Deltas implements this interface should be applies before PostDeltas. +type PreDelta interface { + // PrePosition returns the Position. + PrePosition() Position + + // PreApply applies the delta to object. + PreApply(object interface{}) interface{} +} + +type preDelta struct{ Position } + +func (i preDelta) PrePosition() Position { + return Position(i.Position) +} + +type preDeltas []PreDelta + +// for sorting +func (s preDeltas) Len() int { + return len(s) +} + +// for sorting +func (s preDeltas) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// for sorting +func (s preDeltas) Less(i, j int) bool { + return !s[i].PrePosition().CompareTo(s[j].PrePosition()) +} + +// A PreDelta is a Delta that has a position of the right side JSON object. +// Deltas implements this interface should be applies after PreDeltas. +type PostDelta interface { + // PostPosition returns the Position. + PostPosition() Position + + // PostApply applies the delta to object. + PostApply(object interface{}) interface{} +} + +type postDelta struct{ Position } + +func (i postDelta) PostPosition() Position { + return Position(i.Position) +} + +type postDeltas []PostDelta + +// for sorting +func (s postDeltas) Len() int { + return len(s) +} + +// for sorting +func (s postDeltas) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// for sorting +func (s postDeltas) Less(i, j int) bool { + return s[i].PostPosition().CompareTo(s[j].PostPosition()) +} + +// An Object is a Delta that represents an object of JSON +type Object struct { + postDelta + similarityCache + + // Deltas holds internal Deltas + Deltas []Delta +} + +// NewObject returns an Object +func NewObject(position Position, deltas []Delta) *Object { + d := Object{postDelta: postDelta{position}, Deltas: deltas} + d.similarityCache = newSimilarityCache(&d) + return &d +} + +func (d *Object) PostApply(object interface{}) interface{} { + switch object.(type) { + case map[string]interface{}: + o := object.(map[string]interface{}) + n := string(d.PostPosition().(Name)) + o[n] = applyDeltas(d.Deltas, o[n]) + case []interface{}: + o := object.([]interface{}) + n := int(d.PostPosition().(Index)) + o[n] = applyDeltas(d.Deltas, o[n]) + } + return object +} + +func (d *Object) similarity() (similarity float64) { + similarity = deltasSimilarity(d.Deltas) + return +} + +// An Array is a Delta that represents an array of JSON +type Array struct { + postDelta + similarityCache + + // Deltas holds internal Deltas + Deltas []Delta +} + +// NewArray returns an Array +func NewArray(position Position, deltas []Delta) *Array { + d := Array{postDelta: postDelta{position}, Deltas: deltas} + d.similarityCache = newSimilarityCache(&d) + return &d +} + +func (d *Array) PostApply(object interface{}) interface{} { + switch object.(type) { + case map[string]interface{}: + o := object.(map[string]interface{}) + n := string(d.PostPosition().(Name)) + o[n] = applyDeltas(d.Deltas, o[n]) + case []interface{}: + o := object.([]interface{}) + n := int(d.PostPosition().(Index)) + o[n] = applyDeltas(d.Deltas, o[n]) + } + return object +} + +func (d *Array) similarity() (similarity float64) { + similarity = deltasSimilarity(d.Deltas) + return +} + +// An Added represents a new added field of an object or an array +type Added struct { + postDelta + similarityCache + + // Values holds the added value + Value interface{} +} + +// NewAdded returns a new Added +func NewAdded(position Position, value interface{}) *Added { + d := Added{postDelta: postDelta{position}, Value: value} + return &d +} + +func (d *Added) PostApply(object interface{}) interface{} { + switch object.(type) { + case map[string]interface{}: + object.(map[string]interface{})[string(d.PostPosition().(Name))] = d.Value + case []interface{}: + i := int(d.PostPosition().(Index)) + o := object.([]interface{}) + if i < len(o) { + o = append(o, 0) //dummy + copy(o[i+1:], o[i:]) + o[i] = d.Value + object = o + } else { + object = append(o, d.Value) + } + } + + return object +} + +func (d *Added) similarity() (similarity float64) { + return 0 +} + +// A Modified represents a field whose value is changed. +type Modified struct { + postDelta + similarityCache + + // The value before modification + OldValue interface{} + + // The value after modification + NewValue interface{} +} + +// NewModified returns a Modified +func NewModified(position Position, oldValue, newValue interface{}) *Modified { + d := Modified{ + postDelta: postDelta{position}, + OldValue: oldValue, + NewValue: newValue, + } + d.similarityCache = newSimilarityCache(&d) + return &d + +} + +func (d *Modified) PostApply(object interface{}) interface{} { + switch object.(type) { + case map[string]interface{}: + // TODO check old value + object.(map[string]interface{})[string(d.PostPosition().(Name))] = d.NewValue + case []interface{}: + object.([]interface{})[int(d.PostPosition().(Index))] = d.NewValue + } + return object +} + +func (d *Modified) similarity() (similarity float64) { + similarity += 0.3 // at least, they are at the same position + if reflect.TypeOf(d.OldValue) == reflect.TypeOf(d.NewValue) { + similarity += 0.3 // types are same + + switch d.OldValue.(type) { + case string: + similarity += 0.4 * stringSimilarity(d.OldValue.(string), d.NewValue.(string)) + case float64: + ratio := d.OldValue.(float64) / d.NewValue.(float64) + if ratio > 1 { + ratio = 1 / ratio + } + similarity += 0.4 * ratio + } + } + return +} + +// A TextDiff represents a Modified with TextDiff between the old and the new values. +type TextDiff struct { + Modified + + // Diff string + Diff []dmp.Patch +} + +// NewTextDiff returns +func NewTextDiff(position Position, diff []dmp.Patch, oldValue, newValue interface{}) *TextDiff { + d := TextDiff{ + Modified: *NewModified(position, oldValue, newValue), + Diff: diff, + } + return &d +} + +func (d *TextDiff) PostApply(object interface{}) interface{} { + switch object.(type) { + case map[string]interface{}: + o := object.(map[string]interface{}) + i := string(d.PostPosition().(Name)) + // TODO error + d.OldValue = o[i] + // TODO error + d.patch() + o[i] = d.NewValue + case []interface{}: + o := object.([]interface{}) + i := d.PostPosition().(Index) + d.OldValue = o[i] + // TODO error + d.patch() + o[i] = d.NewValue + } + return object +} + +func (d *TextDiff) patch() error { + if d.OldValue == nil { + return errors.New("Old Value is not set") + } + patcher := dmp.New() + patched, successes := patcher.PatchApply(d.Diff, d.OldValue.(string)) + for _, success := range successes { + if !success { + return errors.New("Failed to apply a patch") + } + } + d.NewValue = patched + return nil +} + +func (d *TextDiff) DiffString() string { + dmp := dmp.New() + return dmp.PatchToText(d.Diff) +} + +// A Delted represents deleted field or index of an Object or an Array. +type Deleted struct { + preDelta + + // The value deleted + Value interface{} +} + +// NewDeleted returns a Deleted +func NewDeleted(position Position, value interface{}) *Deleted { + d := Deleted{ + preDelta: preDelta{position}, + Value: value, + } + return &d + +} + +func (d *Deleted) PreApply(object interface{}) interface{} { + switch object.(type) { + case map[string]interface{}: + // TODO check old value + delete(object.(map[string]interface{}), string(d.PrePosition().(Name))) + case []interface{}: + i := int(d.PrePosition().(Index)) + o := object.([]interface{}) + object = append(o[:i], o[i+1:]...) + } + return object +} + +func (d Deleted) Similarity() (similarity float64) { + return 0 +} + +// A Moved represents field that is moved, which means the index or name is +// changed. Note that, in this library, assigning a Moved and a Modified to +// a single position is not allowed. For the compatibility with jsondiffpatch, +// the Moved in this library can hold the old and new value in it. +type Moved struct { + preDelta + postDelta + similarityCache + // The value before moving + Value interface{} + // The delta applied after moving (for compatibility) + Delta interface{} +} + +func NewMoved(oldPosition Position, newPosition Position, value interface{}, delta Delta) *Moved { + d := Moved{ + preDelta: preDelta{oldPosition}, + postDelta: postDelta{newPosition}, + Value: value, + Delta: delta, + } + d.similarityCache = newSimilarityCache(&d) + return &d +} + +func (d *Moved) PreApply(object interface{}) interface{} { + switch object.(type) { + case map[string]interface{}: + //not supported + case []interface{}: + i := int(d.PrePosition().(Index)) + o := object.([]interface{}) + d.Value = o[i] + object = append(o[:i], o[i+1:]...) + } + return object +} + +func (d *Moved) PostApply(object interface{}) interface{} { + switch object.(type) { + case map[string]interface{}: + //not supported + case []interface{}: + i := int(d.PostPosition().(Index)) + o := object.([]interface{}) + o = append(o, 0) //dummy + copy(o[i+1:], o[i:]) + o[i] = d.Value + object = o + } + + if d.Delta != nil { + d.Delta.(PostDelta).PostApply(object) + } + + return object +} + +func (d *Moved) similarity() (similarity float64) { + similarity = 0.6 // as type and contens are same + ratio := float64(d.PrePosition().(Index)) / float64(d.PostPosition().(Index)) + if ratio > 1 { + ratio = 1 / ratio + } + similarity += 0.4 * ratio + return +} diff --git a/vendor/github.com/yudai/gojsondiff/formatter/ascii.go b/vendor/github.com/yudai/gojsondiff/formatter/ascii.go new file mode 100644 index 00000000..b3078132 --- /dev/null +++ b/vendor/github.com/yudai/gojsondiff/formatter/ascii.go @@ -0,0 +1,370 @@ +package formatter + +import ( + "bytes" + "errors" + "fmt" + "sort" + + diff "github.com/yudai/gojsondiff" +) + +func NewAsciiFormatter(left interface{}, config AsciiFormatterConfig) *AsciiFormatter { + return &AsciiFormatter{ + left: left, + config: config, + } +} + +type AsciiFormatter struct { + left interface{} + config AsciiFormatterConfig + buffer *bytes.Buffer + path []string + size []int + inArray []bool + line *AsciiLine +} + +type AsciiFormatterConfig struct { + ShowArrayIndex bool + Coloring bool +} + +var AsciiFormatterDefaultConfig = AsciiFormatterConfig{} + +type AsciiLine struct { + marker string + indent int + buffer *bytes.Buffer +} + +func (f *AsciiFormatter) Format(diff diff.Diff) (result string, err error) { + f.buffer = bytes.NewBuffer([]byte{}) + f.path = []string{} + f.size = []int{} + f.inArray = []bool{} + + if v, ok := f.left.(map[string]interface{}); ok { + f.formatObject(v, diff) + } else if v, ok := f.left.([]interface{}); ok { + f.formatArray(v, diff) + } else { + return "", fmt.Errorf("expected map[string]interface{} or []interface{}, got %T", + f.left) + } + + return f.buffer.String(), nil +} + +func (f *AsciiFormatter) formatObject(left map[string]interface{}, df diff.Diff) { + f.addLineWith(AsciiSame, "{") + f.push("ROOT", len(left), false) + f.processObject(left, df.Deltas()) + f.pop() + f.addLineWith(AsciiSame, "}") +} + +func (f *AsciiFormatter) formatArray(left []interface{}, df diff.Diff) { + f.addLineWith(AsciiSame, "[") + f.push("ROOT", len(left), true) + f.processArray(left, df.Deltas()) + f.pop() + f.addLineWith(AsciiSame, "]") +} + +func (f *AsciiFormatter) processArray(array []interface{}, deltas []diff.Delta) error { + patchedIndex := 0 + for index, value := range array { + f.processItem(value, deltas, diff.Index(index)) + patchedIndex++ + } + + // additional Added + for _, delta := range deltas { + switch delta.(type) { + case *diff.Added: + d := delta.(*diff.Added) + // skip items already processed + if int(d.Position.(diff.Index)) < len(array) { + continue + } + f.printRecursive(d.Position.String(), d.Value, AsciiAdded) + } + } + + return nil +} + +func (f *AsciiFormatter) processObject(object map[string]interface{}, deltas []diff.Delta) error { + names := sortedKeys(object) + for _, name := range names { + value := object[name] + f.processItem(value, deltas, diff.Name(name)) + } + + // Added + for _, delta := range deltas { + switch delta.(type) { + case *diff.Added: + d := delta.(*diff.Added) + f.printRecursive(d.Position.String(), d.Value, AsciiAdded) + } + } + + return nil +} + +func (f *AsciiFormatter) processItem(value interface{}, deltas []diff.Delta, position diff.Position) error { + matchedDeltas := f.searchDeltas(deltas, position) + positionStr := position.String() + if len(matchedDeltas) > 0 { + for _, matchedDelta := range matchedDeltas { + + switch matchedDelta.(type) { + case *diff.Object: + d := matchedDelta.(*diff.Object) + switch value.(type) { + case map[string]interface{}: + //ok + default: + return errors.New("Type mismatch") + } + o := value.(map[string]interface{}) + + f.newLine(AsciiSame) + f.printKey(positionStr) + f.print("{") + f.closeLine() + f.push(positionStr, len(o), false) + f.processObject(o, d.Deltas) + f.pop() + f.newLine(AsciiSame) + f.print("}") + f.printComma() + f.closeLine() + + case *diff.Array: + d := matchedDelta.(*diff.Array) + switch value.(type) { + case []interface{}: + //ok + default: + return errors.New("Type mismatch") + } + a := value.([]interface{}) + + f.newLine(AsciiSame) + f.printKey(positionStr) + f.print("[") + f.closeLine() + f.push(positionStr, len(a), true) + f.processArray(a, d.Deltas) + f.pop() + f.newLine(AsciiSame) + f.print("]") + f.printComma() + f.closeLine() + + case *diff.Added: + d := matchedDelta.(*diff.Added) + f.printRecursive(positionStr, d.Value, AsciiAdded) + f.size[len(f.size)-1]++ + + case *diff.Modified: + d := matchedDelta.(*diff.Modified) + savedSize := f.size[len(f.size)-1] + f.printRecursive(positionStr, d.OldValue, AsciiDeleted) + f.size[len(f.size)-1] = savedSize + f.printRecursive(positionStr, d.NewValue, AsciiAdded) + + case *diff.TextDiff: + savedSize := f.size[len(f.size)-1] + d := matchedDelta.(*diff.TextDiff) + f.printRecursive(positionStr, d.OldValue, AsciiDeleted) + f.size[len(f.size)-1] = savedSize + f.printRecursive(positionStr, d.NewValue, AsciiAdded) + + case *diff.Deleted: + d := matchedDelta.(*diff.Deleted) + f.printRecursive(positionStr, d.Value, AsciiDeleted) + + default: + return errors.New("Unknown Delta type detected") + } + + } + } else { + f.printRecursive(positionStr, value, AsciiSame) + } + + return nil +} + +func (f *AsciiFormatter) searchDeltas(deltas []diff.Delta, postion diff.Position) (results []diff.Delta) { + results = make([]diff.Delta, 0) + for _, delta := range deltas { + switch delta.(type) { + case diff.PostDelta: + if delta.(diff.PostDelta).PostPosition() == postion { + results = append(results, delta) + } + case diff.PreDelta: + if delta.(diff.PreDelta).PrePosition() == postion { + results = append(results, delta) + } + default: + panic("heh") + } + } + return +} + +const ( + AsciiSame = " " + AsciiAdded = "+" + AsciiDeleted = "-" +) + +var AsciiStyles = map[string]string{ + AsciiAdded: "30;42", + AsciiDeleted: "30;41", +} + +func (f *AsciiFormatter) push(name string, size int, array bool) { + f.path = append(f.path, name) + f.size = append(f.size, size) + f.inArray = append(f.inArray, array) +} + +func (f *AsciiFormatter) pop() { + f.path = f.path[0 : len(f.path)-1] + f.size = f.size[0 : len(f.size)-1] + f.inArray = f.inArray[0 : len(f.inArray)-1] +} + +func (f *AsciiFormatter) addLineWith(marker string, value string) { + f.line = &AsciiLine{ + marker: marker, + indent: len(f.path), + buffer: bytes.NewBufferString(value), + } + f.closeLine() +} + +func (f *AsciiFormatter) newLine(marker string) { + f.line = &AsciiLine{ + marker: marker, + indent: len(f.path), + buffer: bytes.NewBuffer([]byte{}), + } +} + +func (f *AsciiFormatter) closeLine() { + style, ok := AsciiStyles[f.line.marker] + if f.config.Coloring && ok { + f.buffer.WriteString("\x1b[" + style + "m") + } + + f.buffer.WriteString(f.line.marker) + for n := 0; n < f.line.indent; n++ { + f.buffer.WriteString(" ") + } + f.buffer.Write(f.line.buffer.Bytes()) + + if f.config.Coloring && ok { + f.buffer.WriteString("\x1b[0m") + } + + f.buffer.WriteRune('\n') +} + +func (f *AsciiFormatter) printKey(name string) { + if !f.inArray[len(f.inArray)-1] { + fmt.Fprintf(f.line.buffer, `"%s": `, name) + } else if f.config.ShowArrayIndex { + fmt.Fprintf(f.line.buffer, `%s: `, name) + } +} + +func (f *AsciiFormatter) printComma() { + f.size[len(f.size)-1]-- + if f.size[len(f.size)-1] > 0 { + f.line.buffer.WriteRune(',') + } +} + +func (f *AsciiFormatter) printValue(value interface{}) { + switch value.(type) { + case string: + fmt.Fprintf(f.line.buffer, `"%s"`, value) + case nil: + f.line.buffer.WriteString("null") + default: + fmt.Fprintf(f.line.buffer, `%#v`, value) + } +} + +func (f *AsciiFormatter) print(a string) { + f.line.buffer.WriteString(a) +} + +func (f *AsciiFormatter) printRecursive(name string, value interface{}, marker string) { + switch value.(type) { + case map[string]interface{}: + f.newLine(marker) + f.printKey(name) + f.print("{") + f.closeLine() + + m := value.(map[string]interface{}) + size := len(m) + f.push(name, size, false) + + keys := sortedKeys(m) + for _, key := range keys { + f.printRecursive(key, m[key], marker) + } + f.pop() + + f.newLine(marker) + f.print("}") + f.printComma() + f.closeLine() + + case []interface{}: + f.newLine(marker) + f.printKey(name) + f.print("[") + f.closeLine() + + s := value.([]interface{}) + size := len(s) + f.push("", size, true) + for _, item := range s { + f.printRecursive("", item, marker) + } + f.pop() + + f.newLine(marker) + f.print("]") + f.printComma() + f.closeLine() + + default: + f.newLine(marker) + f.printKey(name) + f.printValue(value) + f.printComma() + f.closeLine() + } +} + +func sortedKeys(m map[string]interface{}) (keys []string) { + keys = make([]string, 0, len(m)) + for key, _ := range m { + keys = append(keys, key) + } + sort.Strings(keys) + return +} diff --git a/vendor/github.com/yudai/gojsondiff/formatter/delta.go b/vendor/github.com/yudai/gojsondiff/formatter/delta.go new file mode 100644 index 00000000..f7ccefda --- /dev/null +++ b/vendor/github.com/yudai/gojsondiff/formatter/delta.go @@ -0,0 +1,124 @@ +package formatter + +import ( + "encoding/json" + "errors" + "fmt" + + diff "github.com/yudai/gojsondiff" +) + +const ( + DeltaDelete = 0 + DeltaTextDiff = 2 + DeltaMove = 3 +) + +func NewDeltaFormatter() *DeltaFormatter { + return &DeltaFormatter{ + PrintIndent: true, + } +} + +type DeltaFormatter struct { + PrintIndent bool +} + +func (f *DeltaFormatter) Format(diff diff.Diff) (result string, err error) { + jsonObject, err := f.formatObject(diff.Deltas()) + if err != nil { + return "", err + } + var resultBytes []byte + if f.PrintIndent { + resultBytes, err = json.MarshalIndent(jsonObject, "", " ") + } else { + resultBytes, err = json.Marshal(jsonObject) + } + if err != nil { + return "", err + } + + return string(resultBytes) + "\n", nil +} + +func (f *DeltaFormatter) FormatAsJson(diff diff.Diff) (json map[string]interface{}, err error) { + return f.formatObject(diff.Deltas()) +} + +func (f *DeltaFormatter) formatObject(deltas []diff.Delta) (deltaJson map[string]interface{}, err error) { + deltaJson = map[string]interface{}{} + for _, delta := range deltas { + switch delta.(type) { + case *diff.Object: + d := delta.(*diff.Object) + deltaJson[d.Position.String()], err = f.formatObject(d.Deltas) + if err != nil { + return nil, err + } + case *diff.Array: + d := delta.(*diff.Array) + deltaJson[d.Position.String()], err = f.formatArray(d.Deltas) + if err != nil { + return nil, err + } + case *diff.Added: + d := delta.(*diff.Added) + deltaJson[d.PostPosition().String()] = []interface{}{d.Value} + case *diff.Modified: + d := delta.(*diff.Modified) + deltaJson[d.PostPosition().String()] = []interface{}{d.OldValue, d.NewValue} + case *diff.TextDiff: + d := delta.(*diff.TextDiff) + deltaJson[d.PostPosition().String()] = []interface{}{d.DiffString(), 0, DeltaTextDiff} + case *diff.Deleted: + d := delta.(*diff.Deleted) + deltaJson[d.PrePosition().String()] = []interface{}{d.Value, 0, DeltaDelete} + case *diff.Moved: + return nil, errors.New("Delta type 'Move' is not supported in objects") + default: + return nil, errors.New(fmt.Sprintf("Unknown Delta type detected: %#v", delta)) + } + } + return +} + +func (f *DeltaFormatter) formatArray(deltas []diff.Delta) (deltaJson map[string]interface{}, err error) { + deltaJson = map[string]interface{}{ + "_t": "a", + } + for _, delta := range deltas { + switch delta.(type) { + case *diff.Object: + d := delta.(*diff.Object) + deltaJson[d.Position.String()], err = f.formatObject(d.Deltas) + if err != nil { + return nil, err + } + case *diff.Array: + d := delta.(*diff.Array) + deltaJson[d.Position.String()], err = f.formatArray(d.Deltas) + if err != nil { + return nil, err + } + case *diff.Added: + d := delta.(*diff.Added) + deltaJson[d.PostPosition().String()] = []interface{}{d.Value} + case *diff.Modified: + d := delta.(*diff.Modified) + deltaJson[d.PostPosition().String()] = []interface{}{d.OldValue, d.NewValue} + case *diff.TextDiff: + d := delta.(*diff.TextDiff) + deltaJson[d.PostPosition().String()] = []interface{}{d.DiffString(), 0, DeltaTextDiff} + case *diff.Deleted: + d := delta.(*diff.Deleted) + deltaJson["_"+d.PrePosition().String()] = []interface{}{d.Value, 0, DeltaDelete} + case *diff.Moved: + d := delta.(*diff.Moved) + deltaJson["_"+d.PrePosition().String()] = []interface{}{"", d.PostPosition(), DeltaMove} + default: + return nil, errors.New(fmt.Sprintf("Unknown Delta type detected: %#v", delta)) + } + } + return +} diff --git a/vendor/github.com/yudai/gojsondiff/gojsondiff.go b/vendor/github.com/yudai/gojsondiff/gojsondiff.go new file mode 100644 index 00000000..26560e0f --- /dev/null +++ b/vendor/github.com/yudai/gojsondiff/gojsondiff.go @@ -0,0 +1,426 @@ +// Package gojsondiff implements "Diff" that compares two JSON objects and +// generates Deltas that describes differences between them. The package also +// provides "Patch" that apply Deltas to a JSON object. +package gojsondiff + +import ( + "container/list" + "encoding/json" + "reflect" + "sort" + + dmp "github.com/sergi/go-diff/diffmatchpatch" + "github.com/yudai/golcs" +) + +// A Diff holds deltas generated by a Differ +type Diff interface { + // Deltas returns Deltas that describe differences between two JSON objects + Deltas() []Delta + // Modified returnes true if Diff has at least one Delta. + Modified() bool +} + +type diff struct { + deltas []Delta +} + +func (diff *diff) Deltas() []Delta { + return diff.deltas +} + +func (diff *diff) Modified() bool { + return len(diff.deltas) > 0 +} + +// A Differ conmapres JSON objects and apply patches +type Differ struct { + textDiffMinimumLength int +} + +// New returns new Differ with default configuration +func New() *Differ { + return &Differ{ + textDiffMinimumLength: 30, + } +} + +// Compare compares two JSON strings as []bytes and return a Diff object. +func (differ *Differ) Compare( + left []byte, + right []byte, +) (Diff, error) { + var leftMap, rightMap map[string]interface{} + err := json.Unmarshal(left, &leftMap) + if err != nil { + return nil, err + } + + err = json.Unmarshal(right, &rightMap) + if err != nil { + return nil, err + } + return differ.CompareObjects(leftMap, rightMap), nil +} + +// CompareObjects compares two JSON object as map[string]interface{} +// and return a Diff object. +func (differ *Differ) CompareObjects( + left map[string]interface{}, + right map[string]interface{}, +) Diff { + deltas := differ.compareMaps(left, right) + return &diff{deltas: deltas} +} + +// CompareArrays compares two JSON arrays as []interface{} +// and return a Diff object. +func (differ *Differ) CompareArrays( + left []interface{}, + right []interface{}, +) Diff { + deltas := differ.compareArrays(left, right) + return &diff{deltas: deltas} +} + +func (differ *Differ) compareMaps( + left map[string]interface{}, + right map[string]interface{}, +) (deltas []Delta) { + deltas = make([]Delta, 0) + + names := sortedKeys(left) // stabilize delta order + for _, name := range names { + if rightValue, ok := right[name]; ok { + same, delta := differ.compareValues(Name(name), left[name], rightValue) + if !same { + deltas = append(deltas, delta) + } + } else { + deltas = append(deltas, NewDeleted(Name(name), left[name])) + } + } + + names = sortedKeys(right) // stabilize delta order + for _, name := range names { + if _, ok := left[name]; !ok { + deltas = append(deltas, NewAdded(Name(name), right[name])) + } + } + + return deltas +} + +// ApplyPatch applies a Diff to an JSON object. This method is destructive. +func (differ *Differ) ApplyPatch(json map[string]interface{}, patch Diff) { + applyDeltas(patch.Deltas(), json) +} + +type maybe struct { + index int + lcsIndex int + item interface{} +} + +func (differ *Differ) compareArrays( + left []interface{}, + right []interface{}, +) (deltas []Delta) { + deltas = make([]Delta, 0) + // LCS index pairs + lcsPairs := lcs.New(left, right).IndexPairs() + + // list up items not in LCS, they are maybe deleted + maybeDeleted := list.New() // but maybe moved or modified + lcsI := 0 + for i, leftValue := range left { + if lcsI < len(lcsPairs) && lcsPairs[lcsI].Left == i { + lcsI++ + } else { + maybeDeleted.PushBack(maybe{index: i, lcsIndex: lcsI, item: leftValue}) + } + } + + // list up items not in LCS, they are maybe Added + maybeAdded := list.New() // but maybe moved or modified + lcsI = 0 + for i, rightValue := range right { + if lcsI < len(lcsPairs) && lcsPairs[lcsI].Right == i { + lcsI++ + } else { + maybeAdded.PushBack(maybe{index: i, lcsIndex: lcsI, item: rightValue}) + } + } + + // find moved items + var delNext *list.Element // for prefetch to remove item in iteration + for delCandidate := maybeDeleted.Front(); delCandidate != nil; delCandidate = delNext { + delCan := delCandidate.Value.(maybe) + delNext = delCandidate.Next() + + for addCandidate := maybeAdded.Front(); addCandidate != nil; addCandidate = addCandidate.Next() { + addCan := addCandidate.Value.(maybe) + if reflect.DeepEqual(delCan.item, addCan.item) { + deltas = append(deltas, NewMoved(Index(delCan.index), Index(addCan.index), delCan.item, nil)) + maybeAdded.Remove(addCandidate) + maybeDeleted.Remove(delCandidate) + break + } + } + } + + // find modified or add+del + prevIndexDel := 0 + prevIndexAdd := 0 + delElement := maybeDeleted.Front() + addElement := maybeAdded.Front() + for i := 0; i <= len(lcsPairs); i++ { // not "< len(lcsPairs)" + var lcsPair lcs.IndexPair + var delSize, addSize int + if i < len(lcsPairs) { + lcsPair = lcsPairs[i] + delSize = lcsPair.Left - prevIndexDel - 1 + addSize = lcsPair.Right - prevIndexAdd - 1 + prevIndexDel = lcsPair.Left + prevIndexAdd = lcsPair.Right + } + + var delSlice []maybe + if delSize > 0 { + delSlice = make([]maybe, 0, delSize) + } else { + delSlice = make([]maybe, 0, maybeDeleted.Len()) + } + for ; delElement != nil; delElement = delElement.Next() { + d := delElement.Value.(maybe) + if d.lcsIndex != i { + break + } + delSlice = append(delSlice, d) + } + + var addSlice []maybe + if addSize > 0 { + addSlice = make([]maybe, 0, addSize) + } else { + addSlice = make([]maybe, 0, maybeAdded.Len()) + } + for ; addElement != nil; addElement = addElement.Next() { + a := addElement.Value.(maybe) + if a.lcsIndex != i { + break + } + addSlice = append(addSlice, a) + } + + if len(delSlice) > 0 && len(addSlice) > 0 { + var bestDeltas []Delta + bestDeltas, delSlice, addSlice = differ.maximizeSimilarities(delSlice, addSlice) + for _, delta := range bestDeltas { + deltas = append(deltas, delta) + } + } + + for _, del := range delSlice { + deltas = append(deltas, NewDeleted(Index(del.index), del.item)) + } + for _, add := range addSlice { + deltas = append(deltas, NewAdded(Index(add.index), add.item)) + } + } + + return deltas +} + +func (differ *Differ) compareValues( + position Position, + left interface{}, + right interface{}, +) (same bool, delta Delta) { + if reflect.TypeOf(left) != reflect.TypeOf(right) { + return false, NewModified(position, left, right) + } + + switch left.(type) { + + case map[string]interface{}: + l := left.(map[string]interface{}) + childDeltas := differ.compareMaps(l, right.(map[string]interface{})) + if len(childDeltas) > 0 { + return false, NewObject(position, childDeltas) + } + + case []interface{}: + l := left.([]interface{}) + childDeltas := differ.compareArrays(l, right.([]interface{})) + + if len(childDeltas) > 0 { + return false, NewArray(position, childDeltas) + } + + default: + if !reflect.DeepEqual(left, right) { + + if reflect.ValueOf(left).Kind() == reflect.String && + reflect.ValueOf(right).Kind() == reflect.String && + differ.textDiffMinimumLength <= len(left.(string)) { + + textDiff := dmp.New() + patchs := textDiff.PatchMake(left.(string), right.(string)) + return false, NewTextDiff(position, patchs, left, right) + + } else { + return false, NewModified(position, left, right) + } + } + } + + return true, nil +} + +func applyDeltas(deltas []Delta, object interface{}) interface{} { + preDeltas := make(preDeltas, 0) + for _, delta := range deltas { + switch delta.(type) { + case PreDelta: + preDeltas = append(preDeltas, delta.(PreDelta)) + } + } + sort.Sort(preDeltas) + for _, delta := range preDeltas { + object = delta.PreApply(object) + } + + postDeltas := make(postDeltas, 0, len(deltas)-len(preDeltas)) + for _, delta := range deltas { + switch delta.(type) { + case PostDelta: + postDeltas = append(postDeltas, delta.(PostDelta)) + } + } + sort.Sort(postDeltas) + + for _, delta := range postDeltas { + object = delta.PostApply(object) + } + + return object +} + +func (differ *Differ) maximizeSimilarities(left []maybe, right []maybe) (resultDeltas []Delta, freeLeft, freeRight []maybe) { + deltaTable := make([][]Delta, len(left)) + for i := 0; i < len(left); i++ { + deltaTable[i] = make([]Delta, len(right)) + } + for i, leftValue := range left { + for j, rightValue := range right { + _, delta := differ.compareValues(Index(rightValue.index), leftValue.item, rightValue.item) + deltaTable[i][j] = delta + } + } + + sizeX := len(left) + 1 // margins for both sides + sizeY := len(right) + 1 + + // fill out with similarities + dpTable := make([][]float64, sizeX) + for i := 0; i < sizeX; i++ { + dpTable[i] = make([]float64, sizeY) + } + for x := sizeX - 2; x >= 0; x-- { + for y := sizeY - 2; y >= 0; y-- { + prevX := dpTable[x+1][y] + prevY := dpTable[x][y+1] + score := deltaTable[x][y].Similarity() + dpTable[x+1][y+1] + + dpTable[x][y] = max(prevX, prevY, score) + } + } + + minLength := len(left) + if minLength > len(right) { + minLength = len(right) + } + maxInvalidLength := minLength - 1 + + freeLeft = make([]maybe, 0, len(left)-minLength) + freeRight = make([]maybe, 0, len(right)-minLength) + + resultDeltas = make([]Delta, 0, minLength) + var x, y int + for x, y = 0, 0; x <= sizeX-2 && y <= sizeY-2; { + current := dpTable[x][y] + nextX := dpTable[x+1][y] + nextY := dpTable[x][y+1] + + xValidLength := len(left) - maxInvalidLength + y + yValidLength := len(right) - maxInvalidLength + x + + if x+1 < xValidLength && current == nextX { + freeLeft = append(freeLeft, left[x]) + x++ + } else if y+1 < yValidLength && current == nextY { + freeRight = append(freeRight, right[y]) + y++ + } else { + resultDeltas = append(resultDeltas, deltaTable[x][y]) + x++ + y++ + } + } + for ; x < sizeX-1; x++ { + freeLeft = append(freeLeft, left[x-1]) + } + for ; y < sizeY-1; y++ { + freeRight = append(freeRight, right[y-1]) + } + + return resultDeltas, freeLeft, freeRight +} + +func deltasSimilarity(deltas []Delta) (similarity float64) { + for _, delta := range deltas { + similarity += delta.Similarity() + } + similarity = similarity / float64(len(deltas)) + return +} + +func stringSimilarity(left, right string) (similarity float64) { + matchingLength := float64( + lcs.New( + stringToInterfaceSlice(left), + stringToInterfaceSlice(right), + ).Length(), + ) + similarity = + (matchingLength / float64(len(left))) * (matchingLength / float64(len(right))) + return +} + +func stringToInterfaceSlice(str string) []interface{} { + s := make([]interface{}, len(str)) + for i, v := range str { + s[i] = v + } + return s +} + +func sortedKeys(m map[string]interface{}) (keys []string) { + keys = make([]string, 0, len(m)) + for key, _ := range m { + keys = append(keys, key) + } + sort.Strings(keys) + return +} + +func max(first float64, rest ...float64) (max float64) { + max = first + for _, value := range rest { + if max < value { + max = value + } + } + return max +} diff --git a/vendor/github.com/yudai/gojsondiff/unmarshaler.go b/vendor/github.com/yudai/gojsondiff/unmarshaler.go new file mode 100644 index 00000000..8c26ef5b --- /dev/null +++ b/vendor/github.com/yudai/gojsondiff/unmarshaler.go @@ -0,0 +1,131 @@ +package gojsondiff + +import ( + "encoding/json" + "errors" + dmp "github.com/sergi/go-diff/diffmatchpatch" + "io" + "strconv" +) + +type Unmarshaller struct { +} + +func NewUnmarshaller() *Unmarshaller { + return &Unmarshaller{} +} + +func (um *Unmarshaller) UnmarshalBytes(diffBytes []byte) (Diff, error) { + var diffObj map[string]interface{} + json.Unmarshal(diffBytes, &diffObj) + return um.UnmarshalObject(diffObj) +} + +func (um *Unmarshaller) UnmarshalString(diffString string) (Diff, error) { + return um.UnmarshalBytes([]byte(diffString)) +} + +func (um *Unmarshaller) UnmarshalReader(diffReader io.Reader) (Diff, error) { + var diffBytes []byte + io.ReadFull(diffReader, diffBytes) + return um.UnmarshalBytes(diffBytes) +} + +func (um *Unmarshaller) UnmarshalObject(diffObj map[string]interface{}) (Diff, error) { + result, err := process(Name(""), diffObj) + if err != nil { + return nil, err + } + return &diff{deltas: result.(*Object).Deltas}, nil +} + +func process(position Position, object interface{}) (Delta, error) { + var delta Delta + switch object.(type) { + case map[string]interface{}: + o := object.(map[string]interface{}) + if isArray, typed := o["_t"]; typed && isArray == "a" { + deltas := make([]Delta, 0, len(o)) + for name, value := range o { + if name == "_t" { + continue + } + + normalizedName := name + if normalizedName[0] == '_' { + normalizedName = name[1:] + } + index, err := strconv.Atoi(normalizedName) + if err != nil { + return nil, err + } + + childDelta, err := process(Index(index), value) + if err != nil { + return nil, err + } + + deltas = append(deltas, childDelta) + } + + for _, d := range deltas { + switch d.(type) { + case *Moved: + moved := d.(*Moved) + + var dd interface{} + var i int + for i, dd = range deltas { + switch dd.(type) { + case *Moved: + case PostDelta: + pd := dd.(PostDelta) + if moved.PostPosition() == pd.PostPosition() { + moved.Delta = pd + deltas = append(deltas[:i], deltas[i+1:]...) + } + } + } + } + } + + delta = NewArray(position, deltas) + } else { + deltas := make([]Delta, 0, len(o)) + for name, value := range o { + childDelta, err := process(Name(name), value) + if err != nil { + return nil, err + } + deltas = append(deltas, childDelta) + } + delta = NewObject(position, deltas) + } + case []interface{}: + o := object.([]interface{}) + switch len(o) { + case 1: + delta = NewAdded(position, o[0]) + case 2: + delta = NewModified(position, o[0], o[1]) + case 3: + switch o[2] { + case float64(0): + delta = NewDeleted(position, o[0]) + case float64(2): + dmp := dmp.New() + patches, err := dmp.PatchFromText(o[0].(string)) + if err != nil { + return nil, err + } + delta = NewTextDiff(position, patches, nil, nil) + case float64(3): + delta = NewMoved(position, Index(int(o[1].(float64))), nil, nil) + default: + return nil, errors.New("Unknown delta type") + } + } + } + + return delta, nil +} diff --git a/vendor/github.com/yudai/gojsondiff/wercker.yml b/vendor/github.com/yudai/gojsondiff/wercker.yml new file mode 100644 index 00000000..1a401f87 --- /dev/null +++ b/vendor/github.com/yudai/gojsondiff/wercker.yml @@ -0,0 +1,8 @@ +box: golang:1.6.3 + +build: + steps: + - setup-go-workspace + - script: + name: test + code: make test diff --git a/vendor/github.com/yudai/golcs/LICENSE b/vendor/github.com/yudai/golcs/LICENSE new file mode 100644 index 00000000..ab7d2e0f --- /dev/null +++ b/vendor/github.com/yudai/golcs/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Iwasaki Yudai + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/yudai/golcs/README.md b/vendor/github.com/yudai/golcs/README.md new file mode 100644 index 00000000..4fe8cb3e --- /dev/null +++ b/vendor/github.com/yudai/golcs/README.md @@ -0,0 +1,60 @@ +# Go Longest Common Subsequence (LCS) + +[][godoc] +[][license] + +[godoc]: https://godoc.org/github.com/yudai/golcs +[license]: https://github.com/yudai/golcs/blob/master/LICENSE + +A package to calculate [LCS](http://en.wikipedia.org/wiki/Longest_common_subsequence_problem) of slices. + +## Usage + +```sh +go get github.com/yudai/golcs +``` + +```go +import " github.com/yudai/golcs" + +left = []interface{}{1, 2, 5, 3, 1, 1, 5, 8, 3} +right = []interface{}{1, 2, 3, 3, 4, 4, 5, 1, 6} + +lcs := golcs.New(left, right) + +lcs.Values() // LCS values => []interface{}{1, 2, 5, 1} +lcs.IndexPairs() // Matched indices => [{Left: 0, Right: 0}, {Left: 1, Right: 1}, {Left: 2, Right: 6}, {Left: 4, Right: 7}] +lcs.Length() // Matched length => 4 + +lcs.Table() // Memo table +``` + +All the methods of `Lcs` cache their return values. For example, the memo table is calculated only once and reused when `Values()`, `Length()` and other methods are called. + + +## FAQ + +### How can I give `[]byte` values to `Lcs()` as its arguments? + +As `[]interface{}` is incompatible with `[]othertype` like `[]byte`, you need to create a `[]interface{}` slice and copy the values in your `[]byte` slice into it. Unfortunately, Go doesn't provide any mesure to cast a slice into `[]interface{}` with zero cost. Your copy costs O(n). + +```go +leftBytes := []byte("TGAGTA") +left = make([]interface{}, len(leftBytes)) +for i, v := range leftBytes { + left[i] = v +} + +rightBytes := []byte("GATA") +right = make([]interface{}, len(rightBytes)) +for i, v := range rightBytes { + right[i] = v +} + +lcs.New(left, right) +``` + + +## LICENSE + +The MIT license (See `LICENSE` for detail) diff --git a/vendor/github.com/yudai/golcs/golcs.go b/vendor/github.com/yudai/golcs/golcs.go new file mode 100644 index 00000000..1dd2d568 --- /dev/null +++ b/vendor/github.com/yudai/golcs/golcs.go @@ -0,0 +1,195 @@ +// package lcs provides functions to calculate Longest Common Subsequence (LCS) +// values from two arbitrary arrays. +package lcs + +import ( + "context" + "reflect" +) + +// Lcs is the interface to calculate the LCS of two arrays. +type Lcs interface { + // Values calculates the LCS value of the two arrays. + Values() (values []interface{}) + // ValueContext is a context aware version of Values() + ValuesContext(ctx context.Context) (values []interface{}, err error) + // IndexPairs calculates paris of indices which have the same value in LCS. + IndexPairs() (pairs []IndexPair) + // IndexPairsContext is a context aware version of IndexPairs() + IndexPairsContext(ctx context.Context) (pairs []IndexPair, err error) + // Length calculates the length of the LCS. + Length() (length int) + // LengthContext is a context aware version of Length() + LengthContext(ctx context.Context) (length int, err error) + // Left returns one of the two arrays to be compared. + Left() (leftValues []interface{}) + // Right returns the other of the two arrays to be compared. + Right() (righttValues []interface{}) +} + +// IndexPair represents an pair of indeices in the Left and Right arrays found in the LCS value. +type IndexPair struct { + Left int + Right int +} + +type lcs struct { + left []interface{} + right []interface{} + /* for caching */ + table [][]int + indexPairs []IndexPair + values []interface{} +} + +// New creates a new LCS calculator from two arrays. +func New(left, right []interface{}) Lcs { + return &lcs{ + left: left, + right: right, + table: nil, + indexPairs: nil, + values: nil, + } +} + +// Table implements Lcs.Table() +func (lcs *lcs) Table() (table [][]int) { + table, _ = lcs.TableContext(context.Background()) + return table +} + +// Table implements Lcs.TableContext() +func (lcs *lcs) TableContext(ctx context.Context) (table [][]int, err error) { + if lcs.table != nil { + return lcs.table, nil + } + + sizeX := len(lcs.left) + 1 + sizeY := len(lcs.right) + 1 + + table = make([][]int, sizeX) + for x := 0; x < sizeX; x++ { + table[x] = make([]int, sizeY) + } + + for y := 1; y < sizeY; y++ { + select { // check in each y to save some time + case <-ctx.Done(): + return nil, ctx.Err() + default: + // nop + } + for x := 1; x < sizeX; x++ { + increment := 0 + if reflect.DeepEqual(lcs.left[x-1], lcs.right[y-1]) { + increment = 1 + } + table[x][y] = max(table[x-1][y-1]+increment, table[x-1][y], table[x][y-1]) + } + } + + lcs.table = table + return table, nil +} + +// Table implements Lcs.Length() +func (lcs *lcs) Length() (length int) { + length, _ = lcs.LengthContext(context.Background()) + return length +} + +// Table implements Lcs.LengthContext() +func (lcs *lcs) LengthContext(ctx context.Context) (length int, err error) { + table, err := lcs.TableContext(ctx) + if err != nil { + return 0, err + } + return table[len(lcs.left)][len(lcs.right)], nil +} + +// Table implements Lcs.IndexPairs() +func (lcs *lcs) IndexPairs() (pairs []IndexPair) { + pairs, _ = lcs.IndexPairsContext(context.Background()) + return pairs +} + +// Table implements Lcs.IndexPairsContext() +func (lcs *lcs) IndexPairsContext(ctx context.Context) (pairs []IndexPair, err error) { + if lcs.indexPairs != nil { + return lcs.indexPairs, nil + } + + table, err := lcs.TableContext(ctx) + if err != nil { + return nil, err + } + + pairs = make([]IndexPair, table[len(table)-1][len(table[0])-1]) + + for x, y := len(lcs.left), len(lcs.right); x > 0 && y > 0; { + if reflect.DeepEqual(lcs.left[x-1], lcs.right[y-1]) { + pairs[table[x][y]-1] = IndexPair{Left: x - 1, Right: y - 1} + x-- + y-- + } else { + if table[x-1][y] >= table[x][y-1] { + x-- + } else { + y-- + } + } + } + + lcs.indexPairs = pairs + + return pairs, nil +} + +// Table implements Lcs.Values() +func (lcs *lcs) Values() (values []interface{}) { + values, _ = lcs.ValuesContext(context.Background()) + return values +} + +// Table implements Lcs.ValuesContext() +func (lcs *lcs) ValuesContext(ctx context.Context) (values []interface{}, err error) { + if lcs.values != nil { + return lcs.values, nil + } + + pairs, err := lcs.IndexPairsContext(ctx) + if err != nil { + return nil, err + } + + values = make([]interface{}, len(pairs)) + for i, pair := range pairs { + values[i] = lcs.left[pair.Left] + } + lcs.values = values + + return values, nil +} + +// Table implements Lcs.Left() +func (lcs *lcs) Left() (leftValues []interface{}) { + leftValues = lcs.left + return +} + +// Table implements Lcs.Right() +func (lcs *lcs) Right() (rightValues []interface{}) { + rightValues = lcs.right + return +} + +func max(first int, rest ...int) (max int) { + max = first + for _, value := range rest { + if value > max { + max = value + } + } + return +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 180d50dd..d543c01d 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -232,6 +232,12 @@ "revision": "3f09c2282fc5ad74b3d04a485311f3173c2431d3", "revisionTime": "2017-04-26T07:38:02Z" }, + { + "checksumSHA1": "U6lX43KDDlNOn+Z0Yyww+ZzHfFo=", + "path": "github.com/mattn/go-isatty", + "revision": "fc9e8d8ef48496124e79ae0df75490096eccf6fe", + "revisionTime": "2017-03-22T23:44:13Z" + }, { "checksumSHA1": "2AyUkWjutec6p+470tgio8mYOxI=", "path": "github.com/opencontainers/go-digest", @@ -244,6 +250,12 @@ "revision": "1b00554d822231195d1babd97ff4a781231955c9", "revisionTime": "2017-01-12T15:04:04Z" }, + { + "checksumSHA1": "v7C+aJ1D/z3MEeCte6bxvpoGjM4=", + "path": "github.com/sergi/go-diff/diffmatchpatch", + "revision": "feef008d51ad2b3778f85d387ccf91735543008d", + "revisionTime": "2017-04-09T07:17:39Z" + }, { "checksumSHA1": "Wgm8Y0np1Usy3+ZTTPFdHoqzed8=", "path": "github.com/spf13/cobra", @@ -268,6 +280,24 @@ "revision": "708a42d246822952f38190a8d8c4e6b16a0e600c", "revisionTime": "2017-03-12T11:21:14Z" }, + { + "checksumSHA1": "r7o16T0WQ/XSe2mlQuioMi8gxbw=", + "path": "github.com/yudai/gojsondiff", + "revision": "9209d1532c51cabe0439993586a71c207b09a0ac", + "revisionTime": "2017-02-27T22:09:00Z" + }, + { + "checksumSHA1": "7/V6fDOOfkmSHQahCK+J5G4Y1uk=", + "path": "github.com/yudai/gojsondiff/formatter", + "revision": "9209d1532c51cabe0439993586a71c207b09a0ac", + "revisionTime": "2017-02-27T22:09:00Z" + }, + { + "checksumSHA1": "5xoBoioS8LWHn9FC0pL+vo7wDjs=", + "path": "github.com/yudai/golcs", + "revision": "ecda9a501e8220fae3b4b600c3db4b0ba22cfc68", + "revisionTime": "2017-03-16T03:48:04Z" + }, { "checksumSHA1": "ZaU56svwLgiJD0y8JOB3+/mpYBA=", "path": "golang.org/x/crypto/ssh/terminal", -- GitLab