From b3634c4693d269172a8d51d93991e3f9384213b9 Mon Sep 17 00:00:00 2001
From: Tom Wilkie <tom.wilkie@gmail.com>
Date: Thu, 6 Jul 2017 13:05:11 +0100
Subject: [PATCH] Anonymously import the GCP auth provider to get the
 registration side effect.

---
 cmd/root.go                                   |    1 +
 vendor/cloud.google.com/go/LICENSE            |  202 ++
 .../go/compute/metadata/metadata.go           |  437 +++
 vendor/github.com/golang/protobuf/LICENSE     |   31 +
 .../github.com/golang/protobuf/proto/Makefile |   43 +
 .../github.com/golang/protobuf/proto/clone.go |  229 ++
 .../golang/protobuf/proto/decode.go           |  970 ++++++
 .../golang/protobuf/proto/encode.go           | 1362 ++++++++
 .../github.com/golang/protobuf/proto/equal.go |  300 ++
 .../golang/protobuf/proto/extensions.go       |  587 ++++
 .../github.com/golang/protobuf/proto/lib.go   |  897 ++++++
 .../golang/protobuf/proto/message_set.go      |  311 ++
 .../golang/protobuf/proto/pointer_reflect.go  |  484 +++
 .../golang/protobuf/proto/pointer_unsafe.go   |  270 ++
 .../golang/protobuf/proto/properties.go       |  872 ++++++
 .../github.com/golang/protobuf/proto/text.go  |  854 +++++
 .../golang/protobuf/proto/text_parser.go      |  895 ++++++
 vendor/golang.org/x/net/context/context.go    |  156 +
 .../x/net/context/ctxhttp/ctxhttp.go          |   74 +
 .../x/net/context/ctxhttp/ctxhttp_pre17.go    |  147 +
 vendor/golang.org/x/net/context/go17.go       |   72 +
 vendor/golang.org/x/net/context/pre_go17.go   |  300 ++
 vendor/golang.org/x/oauth2/AUTHORS            |    3 +
 vendor/golang.org/x/oauth2/CONTRIBUTING.md    |   31 +
 vendor/golang.org/x/oauth2/CONTRIBUTORS       |    3 +
 vendor/golang.org/x/oauth2/LICENSE            |   27 +
 vendor/golang.org/x/oauth2/README.md          |   74 +
 .../golang.org/x/oauth2/client_appengine.go   |   25 +
 .../golang.org/x/oauth2/google/appengine.go   |   89 +
 .../x/oauth2/google/appengine_hook.go         |   14 +
 .../x/oauth2/google/appengineflex_hook.go     |   11 +
 vendor/golang.org/x/oauth2/google/default.go  |  130 +
 vendor/golang.org/x/oauth2/google/google.go   |  202 ++
 vendor/golang.org/x/oauth2/google/jwt.go      |   74 +
 vendor/golang.org/x/oauth2/google/sdk.go      |  172 +
 vendor/golang.org/x/oauth2/internal/oauth2.go |   76 +
 vendor/golang.org/x/oauth2/internal/token.go  |  251 ++
 .../golang.org/x/oauth2/internal/transport.go |   69 +
 vendor/golang.org/x/oauth2/jws/jws.go         |  182 ++
 vendor/golang.org/x/oauth2/jwt/jwt.go         |  159 +
 vendor/golang.org/x/oauth2/oauth2.go          |  340 ++
 vendor/golang.org/x/oauth2/token.go           |  158 +
 vendor/golang.org/x/oauth2/transport.go       |  132 +
 .../appengine/CONTRIBUTING.md                 |   90 +
 vendor/google.golang.org/appengine/LICENSE    |  202 ++
 vendor/google.golang.org/appengine/README.md  |   73 +
 .../google.golang.org/appengine/appengine.go  |  113 +
 .../appengine/appengine_vm.go                 |   20 +
 vendor/google.golang.org/appengine/errors.go  |   46 +
 .../google.golang.org/appengine/identity.go   |  142 +
 .../appengine/internal/api.go                 |  677 ++++
 .../appengine/internal/api_classic.go         |  165 +
 .../appengine/internal/api_common.go          |  123 +
 .../appengine/internal/app_id.go              |   28 +
 .../app_identity/app_identity_service.pb.go   |  296 ++
 .../app_identity/app_identity_service.proto   |   64 +
 .../appengine/internal/base/api_base.pb.go    |  133 +
 .../appengine/internal/base/api_base.proto    |   33 +
 .../internal/datastore/datastore_v3.pb.go     | 2778 +++++++++++++++++
 .../internal/datastore/datastore_v3.proto     |  541 ++++
 .../appengine/internal/identity.go            |   14 +
 .../appengine/internal/identity_classic.go    |   57 +
 .../appengine/internal/identity_vm.go         |  101 +
 .../appengine/internal/internal.go            |  110 +
 .../appengine/internal/log/log_service.pb.go  |  899 ++++++
 .../appengine/internal/log/log_service.proto  |  150 +
 .../appengine/internal/main.go                |   15 +
 .../appengine/internal/main_vm.go             |   48 +
 .../appengine/internal/metadata.go            |   61 +
 .../internal/modules/modules_service.pb.go    |  375 +++
 .../internal/modules/modules_service.proto    |   80 +
 .../appengine/internal/net.go                 |   56 +
 .../appengine/internal/regen.sh               |   40 +
 .../internal/remote_api/remote_api.pb.go      |  231 ++
 .../internal/remote_api/remote_api.proto      |   44 +
 .../appengine/internal/transaction.go         |  107 +
 .../internal/urlfetch/urlfetch_service.pb.go  |  355 +++
 .../internal/urlfetch/urlfetch_service.proto  |   64 +
 .../google.golang.org/appengine/namespace.go  |   25 +
 vendor/google.golang.org/appengine/timeout.go |   20 +
 .../appengine/urlfetch/urlfetch.go            |  210 ++
 .../plugin/pkg/client/auth/gcp/BUILD          |   32 +
 .../plugin/pkg/client/auth/gcp/OWNERS         |    6 +
 .../plugin/pkg/client/auth/gcp/gcp.go         |  308 ++
 .../third_party/forked/golang/template/BUILD  |   17 +
 .../forked/golang/template/exec.go            |   94 +
 .../forked/golang/template/funcs.go           |  599 ++++
 vendor/k8s.io/client-go/util/jsonpath/BUILD   |   31 +
 vendor/k8s.io/client-go/util/jsonpath/doc.go  |   20 +
 .../client-go/util/jsonpath/jsonpath.go       |  509 +++
 vendor/k8s.io/client-go/util/jsonpath/node.go |  255 ++
 .../k8s.io/client-go/util/jsonpath/parser.go  |  449 +++
 vendor/vendor.json                            |  144 +
 93 files changed, 22766 insertions(+)
 create mode 100644 vendor/cloud.google.com/go/LICENSE
 create mode 100644 vendor/cloud.google.com/go/compute/metadata/metadata.go
 create mode 100644 vendor/github.com/golang/protobuf/LICENSE
 create mode 100644 vendor/github.com/golang/protobuf/proto/Makefile
 create mode 100644 vendor/github.com/golang/protobuf/proto/clone.go
 create mode 100644 vendor/github.com/golang/protobuf/proto/decode.go
 create mode 100644 vendor/github.com/golang/protobuf/proto/encode.go
 create mode 100644 vendor/github.com/golang/protobuf/proto/equal.go
 create mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go
 create mode 100644 vendor/github.com/golang/protobuf/proto/lib.go
 create mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go
 create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go
 create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
 create mode 100644 vendor/github.com/golang/protobuf/proto/properties.go
 create mode 100644 vendor/github.com/golang/protobuf/proto/text.go
 create mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go
 create mode 100644 vendor/golang.org/x/net/context/context.go
 create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
 create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
 create mode 100644 vendor/golang.org/x/net/context/go17.go
 create mode 100644 vendor/golang.org/x/net/context/pre_go17.go
 create mode 100644 vendor/golang.org/x/oauth2/AUTHORS
 create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md
 create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS
 create mode 100644 vendor/golang.org/x/oauth2/LICENSE
 create mode 100644 vendor/golang.org/x/oauth2/README.md
 create mode 100644 vendor/golang.org/x/oauth2/client_appengine.go
 create mode 100644 vendor/golang.org/x/oauth2/google/appengine.go
 create mode 100644 vendor/golang.org/x/oauth2/google/appengine_hook.go
 create mode 100644 vendor/golang.org/x/oauth2/google/appengineflex_hook.go
 create mode 100644 vendor/golang.org/x/oauth2/google/default.go
 create mode 100644 vendor/golang.org/x/oauth2/google/google.go
 create mode 100644 vendor/golang.org/x/oauth2/google/jwt.go
 create mode 100644 vendor/golang.org/x/oauth2/google/sdk.go
 create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go
 create mode 100644 vendor/golang.org/x/oauth2/internal/token.go
 create mode 100644 vendor/golang.org/x/oauth2/internal/transport.go
 create mode 100644 vendor/golang.org/x/oauth2/jws/jws.go
 create mode 100644 vendor/golang.org/x/oauth2/jwt/jwt.go
 create mode 100644 vendor/golang.org/x/oauth2/oauth2.go
 create mode 100644 vendor/golang.org/x/oauth2/token.go
 create mode 100644 vendor/golang.org/x/oauth2/transport.go
 create mode 100644 vendor/google.golang.org/appengine/CONTRIBUTING.md
 create mode 100644 vendor/google.golang.org/appengine/LICENSE
 create mode 100644 vendor/google.golang.org/appengine/README.md
 create mode 100644 vendor/google.golang.org/appengine/appengine.go
 create mode 100644 vendor/google.golang.org/appengine/appengine_vm.go
 create mode 100644 vendor/google.golang.org/appengine/errors.go
 create mode 100644 vendor/google.golang.org/appengine/identity.go
 create mode 100644 vendor/google.golang.org/appengine/internal/api.go
 create mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go
 create mode 100644 vendor/google.golang.org/appengine/internal/api_common.go
 create mode 100644 vendor/google.golang.org/appengine/internal/app_id.go
 create mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
 create mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
 create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go
 create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto
 create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
 create mode 100755 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
 create mode 100644 vendor/google.golang.org/appengine/internal/identity.go
 create mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go
 create mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go
 create mode 100644 vendor/google.golang.org/appengine/internal/internal.go
 create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go
 create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto
 create mode 100644 vendor/google.golang.org/appengine/internal/main.go
 create mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go
 create mode 100644 vendor/google.golang.org/appengine/internal/metadata.go
 create mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
 create mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.proto
 create mode 100644 vendor/google.golang.org/appengine/internal/net.go
 create mode 100755 vendor/google.golang.org/appengine/internal/regen.sh
 create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
 create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
 create mode 100644 vendor/google.golang.org/appengine/internal/transaction.go
 create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
 create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
 create mode 100644 vendor/google.golang.org/appengine/namespace.go
 create mode 100644 vendor/google.golang.org/appengine/timeout.go
 create mode 100644 vendor/google.golang.org/appengine/urlfetch/urlfetch.go
 create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD
 create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS
 create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go
 create mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/template/BUILD
 create mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go
 create mode 100644 vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go
 create mode 100644 vendor/k8s.io/client-go/util/jsonpath/BUILD
 create mode 100644 vendor/k8s.io/client-go/util/jsonpath/doc.go
 create mode 100644 vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
 create mode 100644 vendor/k8s.io/client-go/util/jsonpath/node.go
 create mode 100644 vendor/k8s.io/client-go/util/jsonpath/parser.go

diff --git a/cmd/root.go b/cmd/root.go
index 9e7012c7..08a7579d 100644
--- a/cmd/root.go
+++ b/cmd/root.go
@@ -36,6 +36,7 @@ import (
 	"k8s.io/apimachinery/pkg/runtime/schema"
 	"k8s.io/client-go/discovery"
 	"k8s.io/client-go/dynamic"
+	_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
 	"k8s.io/client-go/tools/clientcmd"
 
 	"github.com/ksonnet/kubecfg/utils"
diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE
new file mode 100644
index 00000000..a4c5efd8
--- /dev/null
+++ b/vendor/cloud.google.com/go/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2014 Google Inc.
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go
new file mode 100644
index 00000000..e708c031
--- /dev/null
+++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go
@@ -0,0 +1,437 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package metadata provides access to Google Compute Engine (GCE)
+// metadata and API service accounts.
+//
+// This package is a wrapper around the GCE metadata service,
+// as documented at https://developers.google.com/compute/docs/metadata.
+package metadata // import "cloud.google.com/go/compute/metadata"
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"runtime"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/net/context/ctxhttp"
+)
+
+const (
+	// metadataIP is the documented metadata server IP address.
+	metadataIP = "169.254.169.254"
+
+	// metadataHostEnv is the environment variable specifying the
+	// GCE metadata hostname.  If empty, the default value of
+	// metadataIP ("169.254.169.254") is used instead.
+	// This is variable name is not defined by any spec, as far as
+	// I know; it was made up for the Go package.
+	metadataHostEnv = "GCE_METADATA_HOST"
+
+	userAgent = "gcloud-golang/0.1"
+)
+
+type cachedValue struct {
+	k    string
+	trim bool
+	mu   sync.Mutex
+	v    string
+}
+
+var (
+	projID  = &cachedValue{k: "project/project-id", trim: true}
+	projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
+	instID  = &cachedValue{k: "instance/id", trim: true}
+)
+
+var (
+	metaClient = &http.Client{
+		Transport: &http.Transport{
+			Dial: (&net.Dialer{
+				Timeout:   2 * time.Second,
+				KeepAlive: 30 * time.Second,
+			}).Dial,
+			ResponseHeaderTimeout: 2 * time.Second,
+		},
+	}
+	subscribeClient = &http.Client{
+		Transport: &http.Transport{
+			Dial: (&net.Dialer{
+				Timeout:   2 * time.Second,
+				KeepAlive: 30 * time.Second,
+			}).Dial,
+		},
+	}
+)
+
+// NotDefinedError is returned when requested metadata is not defined.
+//
+// The underlying string is the suffix after "/computeMetadata/v1/".
+//
+// This error is not returned if the value is defined to be the empty
+// string.
+type NotDefinedError string
+
+func (suffix NotDefinedError) Error() string {
+	return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
+}
+
+// Get returns a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+//
+// If the GCE_METADATA_HOST environment variable is not defined, a default of
+// 169.254.169.254 will be used instead.
+//
+// If the requested metadata is not defined, the returned error will
+// be of type NotDefinedError.
+func Get(suffix string) (string, error) {
+	val, _, err := getETag(metaClient, suffix)
+	return val, err
+}
+
+// getETag returns a value from the metadata service as well as the associated
+// ETag using the provided client. This func is otherwise equivalent to Get.
+func getETag(client *http.Client, suffix string) (value, etag string, err error) {
+	// Using a fixed IP makes it very difficult to spoof the metadata service in
+	// a container, which is an important use-case for local testing of cloud
+	// deployments. To enable spoofing of the metadata service, the environment
+	// variable GCE_METADATA_HOST is first inspected to decide where metadata
+	// requests shall go.
+	host := os.Getenv(metadataHostEnv)
+	if host == "" {
+		// Using 169.254.169.254 instead of "metadata" here because Go
+		// binaries built with the "netgo" tag and without cgo won't
+		// know the search suffix for "metadata" is
+		// ".google.internal", and this IP address is documented as
+		// being stable anyway.
+		host = metadataIP
+	}
+	url := "http://" + host + "/computeMetadata/v1/" + suffix
+	req, _ := http.NewRequest("GET", url, nil)
+	req.Header.Set("Metadata-Flavor", "Google")
+	req.Header.Set("User-Agent", userAgent)
+	res, err := client.Do(req)
+	if err != nil {
+		return "", "", err
+	}
+	defer res.Body.Close()
+	if res.StatusCode == http.StatusNotFound {
+		return "", "", NotDefinedError(suffix)
+	}
+	if res.StatusCode != 200 {
+		return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
+	}
+	all, err := ioutil.ReadAll(res.Body)
+	if err != nil {
+		return "", "", err
+	}
+	return string(all), res.Header.Get("Etag"), nil
+}
+
+func getTrimmed(suffix string) (s string, err error) {
+	s, err = Get(suffix)
+	s = strings.TrimSpace(s)
+	return
+}
+
+func (c *cachedValue) get() (v string, err error) {
+	defer c.mu.Unlock()
+	c.mu.Lock()
+	if c.v != "" {
+		return c.v, nil
+	}
+	if c.trim {
+		v, err = getTrimmed(c.k)
+	} else {
+		v, err = Get(c.k)
+	}
+	if err == nil {
+		c.v = v
+	}
+	return
+}
+
+var (
+	onGCEOnce sync.Once
+	onGCE     bool
+)
+
+// OnGCE reports whether this process is running on Google Compute Engine.
+func OnGCE() bool {
+	onGCEOnce.Do(initOnGCE)
+	return onGCE
+}
+
+func initOnGCE() {
+	onGCE = testOnGCE()
+}
+
+func testOnGCE() bool {
+	// The user explicitly said they're on GCE, so trust them.
+	if os.Getenv(metadataHostEnv) != "" {
+		return true
+	}
+
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+
+	resc := make(chan bool, 2)
+
+	// Try two strategies in parallel.
+	// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
+	go func() {
+		req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
+		req.Header.Set("User-Agent", userAgent)
+		res, err := ctxhttp.Do(ctx, metaClient, req)
+		if err != nil {
+			resc <- false
+			return
+		}
+		defer res.Body.Close()
+		resc <- res.Header.Get("Metadata-Flavor") == "Google"
+	}()
+
+	go func() {
+		addrs, err := net.LookupHost("metadata.google.internal")
+		if err != nil || len(addrs) == 0 {
+			resc <- false
+			return
+		}
+		resc <- strsContains(addrs, metadataIP)
+	}()
+
+	tryHarder := systemInfoSuggestsGCE()
+	if tryHarder {
+		res := <-resc
+		if res {
+			// The first strategy succeeded, so let's use it.
+			return true
+		}
+		// Wait for either the DNS or metadata server probe to
+		// contradict the other one and say we are running on
+		// GCE. Give it a lot of time to do so, since the system
+		// info already suggests we're running on a GCE BIOS.
+		timer := time.NewTimer(5 * time.Second)
+		defer timer.Stop()
+		select {
+		case res = <-resc:
+			return res
+		case <-timer.C:
+			// Too slow. Who knows what this system is.
+			return false
+		}
+	}
+
+	// There's no hint from the system info that we're running on
+	// GCE, so use the first probe's result as truth, whether it's
+	// true or false. The goal here is to optimize for speed for
+	// users who are NOT running on GCE. We can't assume that
+	// either a DNS lookup or an HTTP request to a blackholed IP
+	// address is fast. Worst case this should return when the
+	// metaClient's Transport.ResponseHeaderTimeout or
+	// Transport.Dial.Timeout fires (in two seconds).
+	return <-resc
+}
+
+// systemInfoSuggestsGCE reports whether the local system (without
+// doing network requests) suggests that we're running on GCE. If this
+// returns true, testOnGCE tries a bit harder to reach its metadata
+// server.
+func systemInfoSuggestsGCE() bool {
+	if runtime.GOOS != "linux" {
+		// We don't have any non-Linux clues available, at least yet.
+		return false
+	}
+	slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
+	name := strings.TrimSpace(string(slurp))
+	return name == "Google" || name == "Google Compute Engine"
+}
+
+// Subscribe subscribes to a value from the metadata service.
+// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
+// The suffix may contain query parameters.
+//
+// Subscribe calls fn with the latest metadata value indicated by the provided
+// suffix. If the metadata value is deleted, fn is called with the empty string
+// and ok false. Subscribe blocks until fn returns a non-nil error or the value
+// is deleted. Subscribe returns the error value returned from the last call to
+// fn, which may be nil when ok == false.
+func Subscribe(suffix string, fn func(v string, ok bool) error) error {
+	const failedSubscribeSleep = time.Second * 5
+
+	// First check to see if the metadata value exists at all.
+	val, lastETag, err := getETag(subscribeClient, suffix)
+	if err != nil {
+		return err
+	}
+
+	if err := fn(val, true); err != nil {
+		return err
+	}
+
+	ok := true
+	if strings.ContainsRune(suffix, '?') {
+		suffix += "&wait_for_change=true&last_etag="
+	} else {
+		suffix += "?wait_for_change=true&last_etag="
+	}
+	for {
+		val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
+		if err != nil {
+			if _, deleted := err.(NotDefinedError); !deleted {
+				time.Sleep(failedSubscribeSleep)
+				continue // Retry on other errors.
+			}
+			ok = false
+		}
+		lastETag = etag
+
+		if err := fn(val, ok); err != nil || !ok {
+			return err
+		}
+	}
+}
+
+// ProjectID returns the current instance's project ID string.
+func ProjectID() (string, error) { return projID.get() }
+
+// NumericProjectID returns the current instance's numeric project ID.
+func NumericProjectID() (string, error) { return projNum.get() }
+
+// InternalIP returns the instance's primary internal IP address.
+func InternalIP() (string, error) {
+	return getTrimmed("instance/network-interfaces/0/ip")
+}
+
+// ExternalIP returns the instance's primary external (public) IP address.
+func ExternalIP() (string, error) {
+	return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
+}
+
+// Hostname returns the instance's hostname. This will be of the form
+// "<instanceID>.c.<projID>.internal".
+func Hostname() (string, error) {
+	return getTrimmed("instance/hostname")
+}
+
+// InstanceTags returns the list of user-defined instance tags,
+// assigned when initially creating a GCE instance.
+func InstanceTags() ([]string, error) {
+	var s []string
+	j, err := Get("instance/tags")
+	if err != nil {
+		return nil, err
+	}
+	if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
+		return nil, err
+	}
+	return s, nil
+}
+
+// InstanceID returns the current VM's numeric instance ID.
+func InstanceID() (string, error) {
+	return instID.get()
+}
+
+// InstanceName returns the current VM's instance ID string.
+func InstanceName() (string, error) {
+	host, err := Hostname()
+	if err != nil {
+		return "", err
+	}
+	return strings.Split(host, ".")[0], nil
+}
+
+// Zone returns the current VM's zone, such as "us-central1-b".
+func Zone() (string, error) {
+	zone, err := getTrimmed("instance/zone")
+	// zone is of the form "projects/<projNum>/zones/<zoneName>".
+	if err != nil {
+		return "", err
+	}
+	return zone[strings.LastIndex(zone, "/")+1:], nil
+}
+
+// InstanceAttributes returns the list of user-defined attributes,
+// assigned when initially creating a GCE VM instance. The value of an
+// attribute can be obtained with InstanceAttributeValue.
+func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
+
+// ProjectAttributes returns the list of user-defined attributes
+// applying to the project as a whole, not just this VM.  The value of
+// an attribute can be obtained with ProjectAttributeValue.
+func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
+
+func lines(suffix string) ([]string, error) {
+	j, err := Get(suffix)
+	if err != nil {
+		return nil, err
+	}
+	s := strings.Split(strings.TrimSpace(j), "\n")
+	for i := range s {
+		s[i] = strings.TrimSpace(s[i])
+	}
+	return s, nil
+}
+
+// InstanceAttributeValue returns the value of the provided VM
+// instance attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// InstanceAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func InstanceAttributeValue(attr string) (string, error) {
+	return Get("instance/attributes/" + attr)
+}
+
+// ProjectAttributeValue returns the value of the provided
+// project attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// ProjectAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func ProjectAttributeValue(attr string) (string, error) {
+	return Get("project/attributes/" + attr)
+}
+
+// Scopes returns the service account scopes for the given account.
+// The account may be empty or the string "default" to use the instance's
+// main account.
+func Scopes(serviceAccount string) ([]string, error) {
+	if serviceAccount == "" {
+		serviceAccount = "default"
+	}
+	return lines("instance/service-accounts/" + serviceAccount + "/scopes")
+}
+
+func strsContains(ss []string, s string) bool {
+	for _, v := range ss {
+		if v == s {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 00000000..1b1b1921
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,31 @@
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors.  All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile
new file mode 100644
index 00000000..e2e0651a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors.  All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+	go install
+
+test: install generate-test-pbs
+	go test
+
+
+generate-test-pbs:
+	make install
+	make -C testdata
+	protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
+	make
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 00000000..e392575b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,229 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+	"log"
+	"reflect"
+	"strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+	in := reflect.ValueOf(pb)
+	if in.IsNil() {
+		return pb
+	}
+
+	out := reflect.New(in.Type().Elem())
+	// out is empty so a merge is a deep copy.
+	mergeStruct(out.Elem(), in.Elem())
+	return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+	in := reflect.ValueOf(src)
+	out := reflect.ValueOf(dst)
+	if out.IsNil() {
+		panic("proto: nil destination")
+	}
+	if in.Type() != out.Type() {
+		// Explicit test prior to mergeStruct so that mistyped nils will fail
+		panic("proto: type mismatch")
+	}
+	if in.IsNil() {
+		// Merging nil into non-nil is a quiet no-op
+		return
+	}
+	mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+	sprop := GetProperties(in.Type())
+	for i := 0; i < in.NumField(); i++ {
+		f := in.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+	}
+
+	if emIn, ok := extendable(in.Addr().Interface()); ok {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	uf := in.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return
+	}
+	uin := uf.Bytes()
+	if len(uin) > 0 {
+		out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+	}
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+	if in.Type() == protoMessageType {
+		if !in.IsNil() {
+			if out.IsNil() {
+				out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+			} else {
+				Merge(out.Interface().(Message), in.Interface().(Message))
+			}
+		}
+		return
+	}
+	switch in.Kind() {
+	case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+		reflect.String, reflect.Uint32, reflect.Uint64:
+		if !viaPtr && isProto3Zero(in) {
+			return
+		}
+		out.Set(in)
+	case reflect.Interface:
+		// Probably a oneof field; copy non-nil values.
+		if in.IsNil() {
+			return
+		}
+		// Allocate destination if it is not set, or set to a different type.
+		// Otherwise we will merge as normal.
+		if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+			out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+		}
+		mergeAny(out.Elem(), in.Elem(), false, nil)
+	case reflect.Map:
+		if in.Len() == 0 {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.MakeMap(in.Type()))
+		}
+		// For maps with value types of *T or []byte we need to deep copy each value.
+		elemKind := in.Type().Elem().Kind()
+		for _, key := range in.MapKeys() {
+			var val reflect.Value
+			switch elemKind {
+			case reflect.Ptr:
+				val = reflect.New(in.Type().Elem().Elem())
+				mergeAny(val, in.MapIndex(key), false, nil)
+			case reflect.Slice:
+				val = in.MapIndex(key)
+				val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+			default:
+				val = in.MapIndex(key)
+			}
+			out.SetMapIndex(key, val)
+		}
+	case reflect.Ptr:
+		if in.IsNil() {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.New(in.Elem().Type()))
+		}
+		mergeAny(out.Elem(), in.Elem(), true, nil)
+	case reflect.Slice:
+		if in.IsNil() {
+			return
+		}
+		if in.Type().Elem().Kind() == reflect.Uint8 {
+			// []byte is a scalar bytes field, not a repeated field.
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value, and should not
+			// be merged.
+			if prop != nil && prop.proto3 && in.Len() == 0 {
+				return
+			}
+
+			// Make a deep copy.
+			// Append to []byte{} instead of []byte(nil) so that we never end up
+			// with a nil result.
+			out.SetBytes(append([]byte{}, in.Bytes()...))
+			return
+		}
+		n := in.Len()
+		if out.IsNil() {
+			out.Set(reflect.MakeSlice(in.Type(), 0, n))
+		}
+		switch in.Type().Elem().Kind() {
+		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+			reflect.String, reflect.Uint32, reflect.Uint64:
+			out.Set(reflect.AppendSlice(out, in))
+		default:
+			for i := 0; i < n; i++ {
+				x := reflect.Indirect(reflect.New(in.Type().Elem()))
+				mergeAny(x, in.Index(i), false, nil)
+				out.Set(reflect.Append(out, x))
+			}
+		}
+	case reflect.Struct:
+		mergeStruct(out, in)
+	default:
+		// unknown type, so not a protocol buffer
+		log.Printf("proto: don't know how to copy %v", in)
+	}
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+	for extNum, eIn := range in {
+		eOut := Extension{desc: eIn.desc}
+		if eIn.value != nil {
+			v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+			mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+			eOut.value = v.Interface()
+		}
+		if eIn.enc != nil {
+			eOut.enc = make([]byte, len(eIn.enc))
+			copy(eOut.enc, eIn.enc)
+		}
+
+		out[extNum] = eOut
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 00000000..aa207298
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,970 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+	for shift := uint(0); shift < 64; shift += 7 {
+		if n >= len(buf) {
+			return 0, 0
+		}
+		b := uint64(buf[n])
+		n++
+		x |= (b & 0x7F) << shift
+		if (b & 0x80) == 0 {
+			return x, n
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+	i := p.index
+	l := len(p.buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		b := p.buf[i]
+		i++
+		x |= (uint64(b) & 0x7F) << shift
+		if b < 0x80 {
+			p.index = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = errOverflow
+	return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+	i := p.index
+	buf := p.buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		p.index++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return p.decodeVarintSlow()
+	}
+
+	var b uint64
+	// we already checked the first byte
+	x = uint64(buf[i]) - 0x80
+	i++
+
+	b = uint64(buf[i])
+	i++
+	x += b << 7
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 7
+
+	b = uint64(buf[i])
+	i++
+	x += b << 14
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 14
+
+	b = uint64(buf[i])
+	i++
+	x += b << 21
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 21
+
+	b = uint64(buf[i])
+	i++
+	x += b << 28
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 28
+
+	b = uint64(buf[i])
+	i++
+	x += b << 35
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 35
+
+	b = uint64(buf[i])
+	i++
+	x += b << 42
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 42
+
+	b = uint64(buf[i])
+	i++
+	x += b << 49
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 49
+
+	b = uint64(buf[i])
+	i++
+	x += b << 56
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 56
+
+	b = uint64(buf[i])
+	i++
+	x += b << 63
+	if b&0x80 == 0 {
+		goto done
+	}
+	// x -= 0x80 << 63 // Always zero.
+
+	return 0, errOverflow
+
+done:
+	p.index = i
+	return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 8
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-8])
+	x |= uint64(p.buf[i-7]) << 8
+	x |= uint64(p.buf[i-6]) << 16
+	x |= uint64(p.buf[i-5]) << 24
+	x |= uint64(p.buf[i-4]) << 32
+	x |= uint64(p.buf[i-3]) << 40
+	x |= uint64(p.buf[i-2]) << 48
+	x |= uint64(p.buf[i-1]) << 56
+	return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 4
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-4])
+	x |= uint64(p.buf[i-3]) << 8
+	x |= uint64(p.buf[i-2]) << 16
+	x |= uint64(p.buf[i-1]) << 24
+	return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+	return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from  the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+	return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+	n, err := p.DecodeVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	nb := int(n)
+	if nb < 0 {
+		return nil, fmt.Errorf("proto: bad byte length %d", nb)
+	}
+	end := p.index + nb
+	if end < p.index || end > len(p.buf) {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	if !alloc {
+		// todo: check if can get more uses of alloc=false
+		buf = p.buf[p.index:end]
+		p.index += nb
+		return
+	}
+
+	buf = make([]byte, nb)
+	copy(buf, p.buf[p.index:])
+	p.index += nb
+	return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+	buf, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return
+	}
+	return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+	oi := o.index
+
+	err := o.skip(t, tag, wire)
+	if err != nil {
+		return err
+	}
+
+	if !unrecField.IsValid() {
+		return nil
+	}
+
+	ptr := structPointer_Bytes(base, unrecField)
+
+	// Add the skipped field to struct field
+	obuf := o.buf
+
+	o.buf = *ptr
+	o.EncodeVarint(uint64(tag<<3 | wire))
+	*ptr = append(o.buf, obuf[oi:o.index]...)
+
+	o.buf = obuf
+
+	return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+	var u uint64
+	var err error
+
+	switch wire {
+	case WireVarint:
+		_, err = o.DecodeVarint()
+	case WireFixed64:
+		_, err = o.DecodeFixed64()
+	case WireBytes:
+		_, err = o.DecodeRawBytes(false)
+	case WireFixed32:
+		_, err = o.DecodeFixed32()
+	case WireStartGroup:
+		for {
+			u, err = o.DecodeVarint()
+			if err != nil {
+				break
+			}
+			fwire := int(u & 0x7)
+			if fwire == WireEndGroup {
+				break
+			}
+			ftag := int(u >> 3)
+			err = o.skip(t, ftag, fwire)
+			if err != nil {
+				break
+			}
+		}
+	default:
+		err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+	}
+	return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves.  The method should reset the receiver before
+// decoding starts.  The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+	Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+	pb.Reset()
+	return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+	// If the object can unmarshal itself, let it.
+	if u, ok := pb.(Unmarshaler); ok {
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+	enc, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+	return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+	typ, base, err := getbase(pb)
+	if err != nil {
+		return err
+	}
+	return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb.  If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+	// If the object can unmarshal itself, let it.
+	if u, ok := pb.(Unmarshaler); ok {
+		err := u.Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+
+	typ, base, err := getbase(pb)
+	if err != nil {
+		return err
+	}
+
+	err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+	if collectStats {
+		stats.Decode++
+	}
+
+	return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+	var state errorState
+	required, reqFields := prop.reqCount, uint64(0)
+
+	var err error
+	for err == nil && o.index < len(o.buf) {
+		oi := o.index
+		var u uint64
+		u, err = o.DecodeVarint()
+		if err != nil {
+			break
+		}
+		wire := int(u & 0x7)
+		if wire == WireEndGroup {
+			if is_group {
+				if required > 0 {
+					// Not enough information to determine the exact field.
+					// (See below.)
+					return &RequiredNotSetError{"{Unknown}"}
+				}
+				return nil // input is satisfied
+			}
+			return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+		}
+		tag := int(u >> 3)
+		if tag <= 0 {
+			return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+		}
+		fieldnum, ok := prop.decoderTags.get(tag)
+		if !ok {
+			// Maybe it's an extension?
+			if prop.extendable {
+				if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {
+					if err = o.skip(st, tag, wire); err == nil {
+						extmap := e.extensionsWrite()
+						ext := extmap[int32(tag)] // may be missing
+						ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+						extmap[int32(tag)] = ext
+					}
+					continue
+				}
+			}
+			// Maybe it's a oneof?
+			if prop.oneofUnmarshaler != nil {
+				m := structPointer_Interface(base, st).(Message)
+				// First return value indicates whether tag is a oneof field.
+				ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+				if err == ErrInternalBadWireType {
+					// Map the error to something more descriptive.
+					// Do the formatting here to save generated code space.
+					err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+				}
+				if ok {
+					continue
+				}
+			}
+			err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+			continue
+		}
+		p := prop.Prop[fieldnum]
+
+		if p.dec == nil {
+			fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+			continue
+		}
+		dec := p.dec
+		if wire != WireStartGroup && wire != p.WireType {
+			if wire == WireBytes && p.packedDec != nil {
+				// a packable field
+				dec = p.packedDec
+			} else {
+				err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+				continue
+			}
+		}
+		decErr := dec(o, p, base)
+		if decErr != nil && !state.shouldContinue(decErr, p) {
+			err = decErr
+		}
+		if err == nil && p.Required {
+			// Successfully decoded a required field.
+			if tag <= 64 {
+				// use bitmap for fields 1-64 to catch field reuse.
+				var mask uint64 = 1 << uint64(tag-1)
+				if reqFields&mask == 0 {
+					// new required field
+					reqFields |= mask
+					required--
+				}
+			} else {
+				// This is imprecise. It can be fooled by a required field
+				// with a tag > 64 that is encoded twice; that's very rare.
+				// A fully correct implementation would require allocating
+				// a data structure, which we would like to avoid.
+				required--
+			}
+		}
+	}
+	if err == nil {
+		if is_group {
+			return io.ErrUnexpectedEOF
+		}
+		if state.err != nil {
+			return state.err
+		}
+		if required > 0 {
+			// Not enough information to determine the exact field. If we use extra
+			// CPU, we could determine the field only if the missing required field
+			// has a tag <= 64 and we check reqFields.
+			return &RequiredNotSetError{"{Unknown}"}
+		}
+	}
+	return err
+}
+
+// Individual type decoders
+// For each,
+//	u is the decoded value,
+//	v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+	boolPoolSize   = 16
+	uint32PoolSize = 8
+	uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	if len(o.bools) == 0 {
+		o.bools = make([]bool, boolPoolSize)
+	}
+	o.bools[0] = u != 0
+	*structPointer_Bool(base, p.field) = &o.bools[0]
+	o.bools = o.bools[1:]
+	return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	*structPointer_BoolVal(base, p.field) = u != 0
+	return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+	return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+	return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	word64_Set(structPointer_Word64(base, p.field), o, u)
+	return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+	return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+	s, err := o.DecodeStringBytes()
+	if err != nil {
+		return err
+	}
+	*structPointer_String(base, p.field) = &s
+	return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+	s, err := o.DecodeStringBytes()
+	if err != nil {
+		return err
+	}
+	*structPointer_StringVal(base, p.field) = s
+	return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+	b, err := o.DecodeRawBytes(true)
+	if err != nil {
+		return err
+	}
+	*structPointer_Bytes(base, p.field) = b
+	return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	v := structPointer_BoolSlice(base, p.field)
+	*v = append(*v, u != 0)
+	return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+	v := structPointer_BoolSlice(base, p.field)
+
+	nn, err := o.DecodeVarint()
+	if err != nil {
+		return err
+	}
+	nb := int(nn) // number of bytes of encoded bools
+	fin := o.index + nb
+	if fin < o.index {
+		return errOverflow
+	}
+
+	y := *v
+	for o.index < fin {
+		u, err := p.valDec(o)
+		if err != nil {
+			return err
+		}
+		y = append(y, u != 0)
+	}
+
+	*v = y
+	return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+	structPointer_Word32Slice(base, p.field).Append(uint32(u))
+	return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+	v := structPointer_Word32Slice(base, p.field)
+
+	nn, err := o.DecodeVarint()
+	if err != nil {
+		return err
+	}
+	nb := int(nn) // number of bytes of encoded int32s
+
+	fin := o.index + nb
+	if fin < o.index {
+		return errOverflow
+	}
+	for o.index < fin {
+		u, err := p.valDec(o)
+		if err != nil {
+			return err
+		}
+		v.Append(uint32(u))
+	}
+	return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+	u, err := p.valDec(o)
+	if err != nil {
+		return err
+	}
+
+	structPointer_Word64Slice(base, p.field).Append(u)
+	return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+	v := structPointer_Word64Slice(base, p.field)
+
+	nn, err := o.DecodeVarint()
+	if err != nil {
+		return err
+	}
+	nb := int(nn) // number of bytes of encoded int64s
+
+	fin := o.index + nb
+	if fin < o.index {
+		return errOverflow
+	}
+	for o.index < fin {
+		u, err := p.valDec(o)
+		if err != nil {
+			return err
+		}
+		v.Append(u)
+	}
+	return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+	s, err := o.DecodeStringBytes()
+	if err != nil {
+		return err
+	}
+	v := structPointer_StringSlice(base, p.field)
+	*v = append(*v, s)
+	return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+	b, err := o.DecodeRawBytes(true)
+	if err != nil {
+		return err
+	}
+	v := structPointer_BytesSlice(base, p.field)
+	*v = append(*v, b)
+	return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+	raw, err := o.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+	oi := o.index       // index at the end of this map entry
+	o.index -= len(raw) // move buffer back to start of map entry
+
+	mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+	if mptr.Elem().IsNil() {
+		mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+	}
+	v := mptr.Elem() // map[K]V
+
+	// Prepare addressable doubly-indirect placeholders for the key and value types.
+	// See enc_new_map for why.
+	keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+	keybase := toStructPointer(keyptr.Addr())                  // **K
+
+	var valbase structPointer
+	var valptr reflect.Value
+	switch p.mtype.Elem().Kind() {
+	case reflect.Slice:
+		// []byte
+		var dummy []byte
+		valptr = reflect.ValueOf(&dummy)  // *[]byte
+		valbase = toStructPointer(valptr) // *[]byte
+	case reflect.Ptr:
+		// message; valptr is **Msg; need to allocate the intermediate pointer
+		valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+		valptr.Set(reflect.New(valptr.Type().Elem()))
+		valbase = toStructPointer(valptr)
+	default:
+		// everything else
+		valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+		valbase = toStructPointer(valptr.Addr())                   // **V
+	}
+
+	// Decode.
+	// This parses a restricted wire format, namely the encoding of a message
+	// with two fields. See enc_new_map for the format.
+	for o.index < oi {
+		// tagcode for key and value properties are always a single byte
+		// because they have tags 1 and 2.
+		tagcode := o.buf[o.index]
+		o.index++
+		switch tagcode {
+		case p.mkeyprop.tagcode[0]:
+			if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+				return err
+			}
+		case p.mvalprop.tagcode[0]:
+			if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+				return err
+			}
+		default:
+			// TODO: Should we silently skip this instead?
+			return fmt.Errorf("proto: bad map data tag %d", raw[0])
+		}
+	}
+	keyelem, valelem := keyptr.Elem(), valptr.Elem()
+	if !keyelem.IsValid() {
+		keyelem = reflect.Zero(p.mtype.Key())
+	}
+	if !valelem.IsValid() {
+		valelem = reflect.Zero(p.mtype.Elem())
+	}
+
+	v.SetMapIndex(keyelem, valelem)
+	return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+	bas := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(bas) {
+		// allocate new nested message
+		bas = toStructPointer(reflect.New(p.stype))
+		structPointer_SetStructPointer(base, p.field, bas)
+	}
+	return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+	raw, e := o.DecodeRawBytes(false)
+	if e != nil {
+		return e
+	}
+
+	bas := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(bas) {
+		// allocate new nested message
+		bas = toStructPointer(reflect.New(p.stype))
+		structPointer_SetStructPointer(base, p.field, bas)
+	}
+
+	// If the object can unmarshal itself, let it.
+	if p.isUnmarshaler {
+		iv := structPointer_Interface(bas, p.stype)
+		return iv.(Unmarshaler).Unmarshal(raw)
+	}
+
+	obuf := o.buf
+	oi := o.index
+	o.buf = raw
+	o.index = 0
+
+	err = o.unmarshalType(p.stype, p.sprop, false, bas)
+	o.buf = obuf
+	o.index = oi
+
+	return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+	return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+	return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+	v := reflect.New(p.stype)
+	bas := toStructPointer(v)
+	structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+	if is_group {
+		err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+		return err
+	}
+
+	raw, err := o.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+
+	// If the object can unmarshal itself, let it.
+	if p.isUnmarshaler {
+		iv := v.Interface()
+		return iv.(Unmarshaler).Unmarshal(raw)
+	}
+
+	obuf := o.buf
+	oi := o.index
+	o.buf = raw
+	o.index = 0
+
+	err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+	o.buf = obuf
+	o.index = oi
+
+	return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 00000000..2b30f846
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1362 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+	field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+	return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+	// errRepeatedHasNil is the error returned if Marshal is called with
+	// a struct with a repeated field containing a nil element.
+	errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+	// errOneofHasNil is the error returned if Marshal is called with
+	// a struct with a oneof field containing a nil element.
+	errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+	// ErrNil is the error returned if Marshal is called with nil.
+	ErrNil = errors.New("proto: Marshal called with nil")
+
+	// ErrTooLarge is the error returned if Marshal is called with a
+	// message that encodes to >2GB.
+	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// maxMarshalSize is the largest allowed size of an encoded protobuf,
+// since C++ and Java use signed int32s for the size.
+const maxMarshalSize = 1<<31 - 1
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+	var buf [maxVarintBytes]byte
+	var n int
+	for n = 0; x > 127; n++ {
+		buf[n] = 0x80 | uint8(x&0x7F)
+		x >>= 7
+	}
+	buf[n] = uint8(x)
+	n++
+	return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+	for x >= 1<<7 {
+		p.buf = append(p.buf, uint8(x&0x7f|0x80))
+		x >>= 7
+	}
+	p.buf = append(p.buf, uint8(x))
+	return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+	return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24),
+		uint8(x>>32),
+		uint8(x>>40),
+		uint8(x>>48),
+		uint8(x>>56))
+	return nil
+}
+
+func sizeFixed64(x uint64) int {
+	return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24))
+	return nil
+}
+
+func sizeFixed32(x uint64) int {
+	return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+	return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+	return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+	p.EncodeVarint(uint64(len(b)))
+	p.buf = append(p.buf, b...)
+	return nil
+}
+
+func sizeRawBytes(b []byte) int {
+	return sizeVarint(uint64(len(b))) +
+		len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+	p.EncodeVarint(uint64(len(s)))
+	p.buf = append(p.buf, s...)
+	return nil
+}
+
+func sizeStringBytes(s string) int {
+	return sizeVarint(uint64(len(s))) +
+		len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+	Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+	// Can the object marshal itself?
+	if m, ok := pb.(Marshaler); ok {
+		return m.Marshal()
+	}
+	p := NewBuffer(nil)
+	err := p.Marshal(pb)
+	if p.buf == nil && err == nil {
+		// Return a non-nil slice on success.
+		return []byte{}, nil
+	}
+	return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+	t, base, err := getbase(pb)
+	if structPointer_IsNil(base) {
+		return ErrNil
+	}
+	if err == nil {
+		var state errorState
+		err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+	}
+	return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+	// Can the object marshal itself?
+	if m, ok := pb.(Marshaler); ok {
+		data, err := m.Marshal()
+		p.buf = append(p.buf, data...)
+		return err
+	}
+
+	t, base, err := getbase(pb)
+	if structPointer_IsNil(base) {
+		return ErrNil
+	}
+	if err == nil {
+		err = p.enc_struct(GetProperties(t.Elem()), base)
+	}
+
+	if collectStats {
+		(stats).Encode++ // Parens are to work around a goimports bug.
+	}
+
+	if len(p.buf) > maxMarshalSize {
+		return ErrTooLarge
+	}
+	return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+	// Can the object marshal itself?  If so, Size is slow.
+	// TODO: add Size to Marshaler, or add a Sizer interface.
+	if m, ok := pb.(Marshaler); ok {
+		b, _ := m.Marshal()
+		return len(b)
+	}
+
+	t, base, err := getbase(pb)
+	if structPointer_IsNil(base) {
+		return 0
+	}
+	if err == nil {
+		n = size_struct(GetProperties(t.Elem()), base)
+	}
+
+	if collectStats {
+		(stats).Size++ // Parens are to work around a goimports bug.
+	}
+
+	return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+	v := *structPointer_Bool(base, p.field)
+	if v == nil {
+		return ErrNil
+	}
+	x := 0
+	if *v {
+		x = 1
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, uint64(x))
+	return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+	v := *structPointer_BoolVal(base, p.field)
+	if !v {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, 1)
+	return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+	v := *structPointer_Bool(base, p.field)
+	if v == nil {
+		return 0
+	}
+	return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+	v := *structPointer_BoolVal(base, p.field)
+	if !v && !p.oneof {
+		return 0
+	}
+	return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+	v := structPointer_Word32(base, p.field)
+	if word32_IsNil(v) {
+		return ErrNil
+	}
+	x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, uint64(x))
+	return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+	v := structPointer_Word32Val(base, p.field)
+	x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+	if x == 0 {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, uint64(x))
+	return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+	v := structPointer_Word32(base, p.field)
+	if word32_IsNil(v) {
+		return 0
+	}
+	x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+	n += len(p.tagcode)
+	n += p.valSize(uint64(x))
+	return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+	v := structPointer_Word32Val(base, p.field)
+	x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+	if x == 0 && !p.oneof {
+		return 0
+	}
+	n += len(p.tagcode)
+	n += p.valSize(uint64(x))
+	return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+	v := structPointer_Word32(base, p.field)
+	if word32_IsNil(v) {
+		return ErrNil
+	}
+	x := word32_Get(v)
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, uint64(x))
+	return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+	v := structPointer_Word32Val(base, p.field)
+	x := word32Val_Get(v)
+	if x == 0 {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, uint64(x))
+	return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+	v := structPointer_Word32(base, p.field)
+	if word32_IsNil(v) {
+		return 0
+	}
+	x := word32_Get(v)
+	n += len(p.tagcode)
+	n += p.valSize(uint64(x))
+	return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+	v := structPointer_Word32Val(base, p.field)
+	x := word32Val_Get(v)
+	if x == 0 && !p.oneof {
+		return 0
+	}
+	n += len(p.tagcode)
+	n += p.valSize(uint64(x))
+	return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+	v := structPointer_Word64(base, p.field)
+	if word64_IsNil(v) {
+		return ErrNil
+	}
+	x := word64_Get(v)
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, x)
+	return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+	v := structPointer_Word64Val(base, p.field)
+	x := word64Val_Get(v)
+	if x == 0 {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	p.valEnc(o, x)
+	return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+	v := structPointer_Word64(base, p.field)
+	if word64_IsNil(v) {
+		return 0
+	}
+	x := word64_Get(v)
+	n += len(p.tagcode)
+	n += p.valSize(x)
+	return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+	v := structPointer_Word64Val(base, p.field)
+	x := word64Val_Get(v)
+	if x == 0 && !p.oneof {
+		return 0
+	}
+	n += len(p.tagcode)
+	n += p.valSize(x)
+	return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+	v := *structPointer_String(base, p.field)
+	if v == nil {
+		return ErrNil
+	}
+	x := *v
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeStringBytes(x)
+	return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+	v := *structPointer_StringVal(base, p.field)
+	if v == "" {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeStringBytes(v)
+	return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+	v := *structPointer_String(base, p.field)
+	if v == nil {
+		return 0
+	}
+	x := *v
+	n += len(p.tagcode)
+	n += sizeStringBytes(x)
+	return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+	v := *structPointer_StringVal(base, p.field)
+	if v == "" && !p.oneof {
+		return 0
+	}
+	n += len(p.tagcode)
+	n += sizeStringBytes(v)
+	return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return v.IsNil()
+	}
+	return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+	var state errorState
+	structp := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(structp) {
+		return ErrNil
+	}
+
+	// Can the object marshal itself?
+	if p.isMarshaler {
+		m := structPointer_Interface(structp, p.stype).(Marshaler)
+		data, err := m.Marshal()
+		if err != nil && !state.shouldContinue(err, nil) {
+			return err
+		}
+		o.buf = append(o.buf, p.tagcode...)
+		o.EncodeRawBytes(data)
+		return state.err
+	}
+
+	o.buf = append(o.buf, p.tagcode...)
+	return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+	structp := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(structp) {
+		return 0
+	}
+
+	// Can the object marshal itself?
+	if p.isMarshaler {
+		m := structPointer_Interface(structp, p.stype).(Marshaler)
+		data, _ := m.Marshal()
+		n0 := len(p.tagcode)
+		n1 := sizeRawBytes(data)
+		return n0 + n1
+	}
+
+	n0 := len(p.tagcode)
+	n1 := size_struct(p.sprop, structp)
+	n2 := sizeVarint(uint64(n1)) // size of encoded length
+	return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+	var state errorState
+	b := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(b) {
+		return ErrNil
+	}
+
+	o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+	err := o.enc_struct(p.sprop, b)
+	if err != nil && !state.shouldContinue(err, nil) {
+		return err
+	}
+	o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+	return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+	b := structPointer_GetStructPointer(base, p.field)
+	if structPointer_IsNil(b) {
+		return 0
+	}
+
+	n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+	n += size_struct(p.sprop, b)
+	n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+	return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+	s := *structPointer_BoolSlice(base, p.field)
+	l := len(s)
+	if l == 0 {
+		return ErrNil
+	}
+	for _, x := range s {
+		o.buf = append(o.buf, p.tagcode...)
+		v := uint64(0)
+		if x {
+			v = 1
+		}
+		p.valEnc(o, v)
+	}
+	return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+	s := *structPointer_BoolSlice(base, p.field)
+	l := len(s)
+	if l == 0 {
+		return 0
+	}
+	return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+	s := *structPointer_BoolSlice(base, p.field)
+	l := len(s)
+	if l == 0 {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+	for _, x := range s {
+		v := uint64(0)
+		if x {
+			v = 1
+		}
+		p.valEnc(o, v)
+	}
+	return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+	s := *structPointer_BoolSlice(base, p.field)
+	l := len(s)
+	if l == 0 {
+		return 0
+	}
+	n += len(p.tagcode)
+	n += sizeVarint(uint64(l))
+	n += l // each bool takes exactly one byte
+	return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+	s := *structPointer_Bytes(base, p.field)
+	if s == nil {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeRawBytes(s)
+	return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+	s := *structPointer_Bytes(base, p.field)
+	if len(s) == 0 {
+		return ErrNil
+	}
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeRawBytes(s)
+	return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+	s := *structPointer_Bytes(base, p.field)
+	if s == nil && !p.oneof {
+		return 0
+	}
+	n += len(p.tagcode)
+	n += sizeRawBytes(s)
+	return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+	s := *structPointer_Bytes(base, p.field)
+	if len(s) == 0 && !p.oneof {
+		return 0
+	}
+	n += len(p.tagcode)
+	n += sizeRawBytes(s)
+	return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return ErrNil
+	}
+	for i := 0; i < l; i++ {
+		o.buf = append(o.buf, p.tagcode...)
+		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+		p.valEnc(o, uint64(x))
+	}
+	return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return 0
+	}
+	for i := 0; i < l; i++ {
+		n += len(p.tagcode)
+		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+		n += p.valSize(uint64(x))
+	}
+	return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return ErrNil
+	}
+	// TODO: Reuse a Buffer.
+	buf := NewBuffer(nil)
+	for i := 0; i < l; i++ {
+		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+		p.valEnc(buf, uint64(x))
+	}
+
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeVarint(uint64(len(buf.buf)))
+	o.buf = append(o.buf, buf.buf...)
+	return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return 0
+	}
+	var bufSize int
+	for i := 0; i < l; i++ {
+		x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+		bufSize += p.valSize(uint64(x))
+	}
+
+	n += len(p.tagcode)
+	n += sizeVarint(uint64(bufSize))
+	n += bufSize
+	return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return ErrNil
+	}
+	for i := 0; i < l; i++ {
+		o.buf = append(o.buf, p.tagcode...)
+		x := s.Index(i)
+		p.valEnc(o, uint64(x))
+	}
+	return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return 0
+	}
+	for i := 0; i < l; i++ {
+		n += len(p.tagcode)
+		x := s.Index(i)
+		n += p.valSize(uint64(x))
+	}
+	return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return ErrNil
+	}
+	// TODO: Reuse a Buffer.
+	buf := NewBuffer(nil)
+	for i := 0; i < l; i++ {
+		p.valEnc(buf, uint64(s.Index(i)))
+	}
+
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeVarint(uint64(len(buf.buf)))
+	o.buf = append(o.buf, buf.buf...)
+	return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+	s := structPointer_Word32Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return 0
+	}
+	var bufSize int
+	for i := 0; i < l; i++ {
+		bufSize += p.valSize(uint64(s.Index(i)))
+	}
+
+	n += len(p.tagcode)
+	n += sizeVarint(uint64(bufSize))
+	n += bufSize
+	return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+	s := structPointer_Word64Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return ErrNil
+	}
+	for i := 0; i < l; i++ {
+		o.buf = append(o.buf, p.tagcode...)
+		p.valEnc(o, s.Index(i))
+	}
+	return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+	s := structPointer_Word64Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return 0
+	}
+	for i := 0; i < l; i++ {
+		n += len(p.tagcode)
+		n += p.valSize(s.Index(i))
+	}
+	return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+	s := structPointer_Word64Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return ErrNil
+	}
+	// TODO: Reuse a Buffer.
+	buf := NewBuffer(nil)
+	for i := 0; i < l; i++ {
+		p.valEnc(buf, s.Index(i))
+	}
+
+	o.buf = append(o.buf, p.tagcode...)
+	o.EncodeVarint(uint64(len(buf.buf)))
+	o.buf = append(o.buf, buf.buf...)
+	return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+	s := structPointer_Word64Slice(base, p.field)
+	l := s.Len()
+	if l == 0 {
+		return 0
+	}
+	var bufSize int
+	for i := 0; i < l; i++ {
+		bufSize += p.valSize(s.Index(i))
+	}
+
+	n += len(p.tagcode)
+	n += sizeVarint(uint64(bufSize))
+	n += bufSize
+	return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+	ss := *structPointer_BytesSlice(base, p.field)
+	l := len(ss)
+	if l == 0 {
+		return ErrNil
+	}
+	for i := 0; i < l; i++ {
+		o.buf = append(o.buf, p.tagcode...)
+		o.EncodeRawBytes(ss[i])
+	}
+	return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+	ss := *structPointer_BytesSlice(base, p.field)
+	l := len(ss)
+	if l == 0 {
+		return 0
+	}
+	n += l * len(p.tagcode)
+	for i := 0; i < l; i++ {
+		n += sizeRawBytes(ss[i])
+	}
+	return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+	ss := *structPointer_StringSlice(base, p.field)
+	l := len(ss)
+	for i := 0; i < l; i++ {
+		o.buf = append(o.buf, p.tagcode...)
+		o.EncodeStringBytes(ss[i])
+	}
+	return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+	ss := *structPointer_StringSlice(base, p.field)
+	l := len(ss)
+	n += l * len(p.tagcode)
+	for i := 0; i < l; i++ {
+		n += sizeStringBytes(ss[i])
+	}
+	return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+	var state errorState
+	s := structPointer_StructPointerSlice(base, p.field)
+	l := s.Len()
+
+	for i := 0; i < l; i++ {
+		structp := s.Index(i)
+		if structPointer_IsNil(structp) {
+			return errRepeatedHasNil
+		}
+
+		// Can the object marshal itself?
+		if p.isMarshaler {
+			m := structPointer_Interface(structp, p.stype).(Marshaler)
+			data, err := m.Marshal()
+			if err != nil && !state.shouldContinue(err, nil) {
+				return err
+			}
+			o.buf = append(o.buf, p.tagcode...)
+			o.EncodeRawBytes(data)
+			continue
+		}
+
+		o.buf = append(o.buf, p.tagcode...)
+		err := o.enc_len_struct(p.sprop, structp, &state)
+		if err != nil && !state.shouldContinue(err, nil) {
+			if err == ErrNil {
+				return errRepeatedHasNil
+			}
+			return err
+		}
+	}
+	return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+	s := structPointer_StructPointerSlice(base, p.field)
+	l := s.Len()
+	n += l * len(p.tagcode)
+	for i := 0; i < l; i++ {
+		structp := s.Index(i)
+		if structPointer_IsNil(structp) {
+			return // return the size up to this point
+		}
+
+		// Can the object marshal itself?
+		if p.isMarshaler {
+			m := structPointer_Interface(structp, p.stype).(Marshaler)
+			data, _ := m.Marshal()
+			n += sizeRawBytes(data)
+			continue
+		}
+
+		n0 := size_struct(p.sprop, structp)
+		n1 := sizeVarint(uint64(n0)) // size of encoded length
+		n += n0 + n1
+	}
+	return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+	var state errorState
+	s := structPointer_StructPointerSlice(base, p.field)
+	l := s.Len()
+
+	for i := 0; i < l; i++ {
+		b := s.Index(i)
+		if structPointer_IsNil(b) {
+			return errRepeatedHasNil
+		}
+
+		o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+		err := o.enc_struct(p.sprop, b)
+
+		if err != nil && !state.shouldContinue(err, nil) {
+			if err == ErrNil {
+				return errRepeatedHasNil
+			}
+			return err
+		}
+
+		o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+	}
+	return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+	s := structPointer_StructPointerSlice(base, p.field)
+	l := s.Len()
+
+	n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+	n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+	for i := 0; i < l; i++ {
+		b := s.Index(i)
+		if structPointer_IsNil(b) {
+			return // return size up to this point
+		}
+
+		n += size_struct(p.sprop, b)
+	}
+	return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+	exts := structPointer_ExtMap(base, p.field)
+	if err := encodeExtensionsMap(*exts); err != nil {
+		return err
+	}
+
+	return o.enc_map_body(*exts)
+}
+
+func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
+	exts := structPointer_Extensions(base, p.field)
+
+	v, mu := exts.extensionsRead()
+	if v == nil {
+		return nil
+	}
+
+	mu.Lock()
+	defer mu.Unlock()
+	if err := encodeExtensionsMap(v); err != nil {
+		return err
+	}
+
+	return o.enc_map_body(v)
+}
+
+func (o *Buffer) enc_map_body(v map[int32]Extension) error {
+	// Fast-path for common cases: zero or one extensions.
+	if len(v) <= 1 {
+		for _, e := range v {
+			o.buf = append(o.buf, e.enc...)
+		}
+		return nil
+	}
+
+	// Sort keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(v))
+	for k := range v {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, k := range keys {
+		o.buf = append(o.buf, v[int32(k)].enc...)
+	}
+	return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+	v := structPointer_ExtMap(base, p.field)
+	return extensionsMapSize(*v)
+}
+
+func size_exts(p *Properties, base structPointer) int {
+	v := structPointer_Extensions(base, p.field)
+	return extensionsSize(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+	var state errorState // XXX: or do we need to plumb this through?
+
+	/*
+		A map defined as
+			map<key_type, value_type> map_field = N;
+		is encoded in the same way as
+			message MapFieldEntry {
+				key_type key = 1;
+				value_type value = 2;
+			}
+			repeated MapFieldEntry map_field = N;
+	*/
+
+	v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+	if v.Len() == 0 {
+		return nil
+	}
+
+	keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+	enc := func() error {
+		if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+			return err
+		}
+		if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {
+			return err
+		}
+		return nil
+	}
+
+	// Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+	for _, key := range v.MapKeys() {
+		val := v.MapIndex(key)
+
+		keycopy.Set(key)
+		valcopy.Set(val)
+
+		o.buf = append(o.buf, p.tagcode...)
+		if err := o.enc_len_thing(enc, &state); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+	v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+	keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+	n := 0
+	for _, key := range v.MapKeys() {
+		val := v.MapIndex(key)
+		keycopy.Set(key)
+		valcopy.Set(val)
+
+		// Tag codes for key and val are the responsibility of the sub-sizer.
+		keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+		valsize := p.mvalprop.size(p.mvalprop, valbase)
+		entry := keysize + valsize
+		// Add on tag code and length of map entry itself.
+		n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+	}
+	return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+	// Prepare addressable doubly-indirect placeholders for the key and value types.
+	// This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+	keycopy = reflect.New(mapType.Key()).Elem()                 // addressable K
+	keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+	keyptr.Set(keycopy.Addr())                                  //
+	keybase = toStructPointer(keyptr.Addr())                    // **K
+
+	// Value types are more varied and require special handling.
+	switch mapType.Elem().Kind() {
+	case reflect.Slice:
+		// []byte
+		var dummy []byte
+		valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+		valbase = toStructPointer(valcopy.Addr())
+	case reflect.Ptr:
+		// message; the generated field type is map[K]*Msg (so V is *Msg),
+		// so we only need one level of indirection.
+		valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+		valbase = toStructPointer(valcopy.Addr())
+	default:
+		// everything else
+		valcopy = reflect.New(mapType.Elem()).Elem()                // addressable V
+		valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+		valptr.Set(valcopy.Addr())                                  //
+		valbase = toStructPointer(valptr.Addr())                    // **V
+	}
+	return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+	var state errorState
+	// Encode fields in tag order so that decoders may use optimizations
+	// that depend on the ordering.
+	// https://developers.google.com/protocol-buffers/docs/encoding#order
+	for _, i := range prop.order {
+		p := prop.Prop[i]
+		if p.enc != nil {
+			err := p.enc(o, p, base)
+			if err != nil {
+				if err == ErrNil {
+					if p.Required && state.err == nil {
+						state.err = &RequiredNotSetError{p.Name}
+					}
+				} else if err == errRepeatedHasNil {
+					// Give more context to nil values in repeated fields.
+					return errors.New("repeated field " + p.OrigName + " has nil element")
+				} else if !state.shouldContinue(err, p) {
+					return err
+				}
+			}
+			if len(o.buf) > maxMarshalSize {
+				return ErrTooLarge
+			}
+		}
+	}
+
+	// Do oneof fields.
+	if prop.oneofMarshaler != nil {
+		m := structPointer_Interface(base, prop.stype).(Message)
+		if err := prop.oneofMarshaler(m, o); err == ErrNil {
+			return errOneofHasNil
+		} else if err != nil {
+			return err
+		}
+	}
+
+	// Add unrecognized fields at the end.
+	if prop.unrecField.IsValid() {
+		v := *structPointer_Bytes(base, prop.unrecField)
+		if len(o.buf)+len(v) > maxMarshalSize {
+			return ErrTooLarge
+		}
+		if len(v) > 0 {
+			o.buf = append(o.buf, v...)
+		}
+	}
+
+	return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+	for _, i := range prop.order {
+		p := prop.Prop[i]
+		if p.size != nil {
+			n += p.size(p, base)
+		}
+	}
+
+	// Add unrecognized fields at the end.
+	if prop.unrecField.IsValid() {
+		v := *structPointer_Bytes(base, prop.unrecField)
+		n += len(v)
+	}
+
+	// Factor in any oneof fields.
+	if prop.oneofSizer != nil {
+		m := structPointer_Interface(base, prop.stype).(Message)
+		n += prop.oneofSizer(m)
+	}
+
+	return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+	return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+	iLen := len(o.buf)
+	o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+	iMsg := len(o.buf)
+	err := enc()
+	if err != nil && !state.shouldContinue(err, nil) {
+		return err
+	}
+	lMsg := len(o.buf) - iMsg
+	lLen := sizeVarint(uint64(lMsg))
+	switch x := lLen - (iMsg - iLen); {
+	case x > 0: // actual length is x bytes larger than the space we reserved
+		// Move msg x bytes right.
+		o.buf = append(o.buf, zeroes[:x]...)
+		copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+	case x < 0: // actual length is x bytes smaller than the space we reserved
+		// Move msg x bytes left.
+		copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+		o.buf = o.buf[:len(o.buf)+x] // x is negative
+	}
+	// Encode the length in the reserved space.
+	o.buf = o.buf[:iLen]
+	o.EncodeVarint(uint64(lMsg))
+	o.buf = o.buf[:len(o.buf)+lMsg]
+	return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+	err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+	// Ignore unset required fields.
+	reqNotSet, ok := err.(*RequiredNotSetError)
+	if !ok {
+		return false
+	}
+	if s.err == nil {
+		if prop != nil {
+			err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+		}
+		s.err = err
+	}
+	return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 00000000..2ed1cf59
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+	"bytes"
+	"log"
+	"reflect"
+	"strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+  - Two messages are equal iff they are the same type,
+    corresponding fields are equal, unknown field sets
+    are equal, and extensions sets are equal.
+  - Two set scalar fields are equal iff their values are equal.
+    If the fields are of a floating-point type, remember that
+    NaN != x for all x, including NaN. If the message is defined
+    in a proto3 .proto file, fields are not "set"; specifically,
+    zero length proto3 "bytes" fields are equal (nil == {}).
+  - Two repeated fields are equal iff their lengths are the same,
+    and their corresponding elements are equal. Note a "bytes" field,
+    although represented by []byte, is not a repeated field and the
+    rule for the scalar fields described above applies.
+  - Two unset fields are equal.
+  - Two unknown field sets are equal if their current
+    encoded state is equal.
+  - Two extension sets are equal iff they have corresponding
+    elements that are pairwise equal.
+  - Two map fields are equal iff their lengths are the same,
+    and they contain the same set of elements. Zero-length map
+    fields are equal.
+  - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+	if a == nil || b == nil {
+		return a == b
+	}
+	v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	if v1.Kind() == reflect.Ptr {
+		if v1.IsNil() {
+			return v2.IsNil()
+		}
+		if v2.IsNil() {
+			return false
+		}
+		v1, v2 = v1.Elem(), v2.Elem()
+	}
+	if v1.Kind() != reflect.Struct {
+		return false
+	}
+	return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+	sprop := GetProperties(v1.Type())
+	for i := 0; i < v1.NumField(); i++ {
+		f := v1.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		f1, f2 := v1.Field(i), v2.Field(i)
+		if f.Type.Kind() == reflect.Ptr {
+			if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+				// both unset
+				continue
+			} else if n1 != n2 {
+				// set/unset mismatch
+				return false
+			}
+			b1, ok := f1.Interface().(raw)
+			if ok {
+				b2 := f2.Interface().(raw)
+				// RawMessage
+				if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+					return false
+				}
+				continue
+			}
+			f1, f2 = f1.Elem(), f2.Elem()
+		}
+		if !equalAny(f1, f2, sprop.Prop[i]) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_InternalExtensions")
+		if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_extensions")
+		if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+			return false
+		}
+	}
+
+	uf := v1.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return true
+	}
+
+	u1 := uf.Bytes()
+	u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+	if !bytes.Equal(u1, u2) {
+		return false
+	}
+
+	return true
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+	if v1.Type() == protoMessageType {
+		m1, _ := v1.Interface().(Message)
+		m2, _ := v2.Interface().(Message)
+		return Equal(m1, m2)
+	}
+	switch v1.Kind() {
+	case reflect.Bool:
+		return v1.Bool() == v2.Bool()
+	case reflect.Float32, reflect.Float64:
+		return v1.Float() == v2.Float()
+	case reflect.Int32, reflect.Int64:
+		return v1.Int() == v2.Int()
+	case reflect.Interface:
+		// Probably a oneof field; compare the inner values.
+		n1, n2 := v1.IsNil(), v2.IsNil()
+		if n1 || n2 {
+			return n1 == n2
+		}
+		e1, e2 := v1.Elem(), v2.Elem()
+		if e1.Type() != e2.Type() {
+			return false
+		}
+		return equalAny(e1, e2, nil)
+	case reflect.Map:
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for _, key := range v1.MapKeys() {
+			val2 := v2.MapIndex(key)
+			if !val2.IsValid() {
+				// This key was not found in the second map.
+				return false
+			}
+			if !equalAny(v1.MapIndex(key), val2, nil) {
+				return false
+			}
+		}
+		return true
+	case reflect.Ptr:
+		// Maps may have nil values in them, so check for nil.
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		if v1.IsNil() != v2.IsNil() {
+			return false
+		}
+		return equalAny(v1.Elem(), v2.Elem(), prop)
+	case reflect.Slice:
+		if v1.Type().Elem().Kind() == reflect.Uint8 {
+			// short circuit: []byte
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value.
+			if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+				return true
+			}
+			if v1.IsNil() != v2.IsNil() {
+				return false
+			}
+			return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+		}
+
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for i := 0; i < v1.Len(); i++ {
+			if !equalAny(v1.Index(i), v2.Index(i), prop) {
+				return false
+			}
+		}
+		return true
+	case reflect.String:
+		return v1.Interface().(string) == v2.Interface().(string)
+	case reflect.Struct:
+		return equalStruct(v1, v2)
+	case reflect.Uint32, reflect.Uint64:
+		return v1.Uint() == v2.Uint()
+	}
+
+	// unknown type, so not a protocol buffer
+	log.Printf("proto: don't know how to compare %v", v1)
+	return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+	em1, _ := x1.extensionsRead()
+	em2, _ := x2.extensionsRead()
+	return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+	if len(em1) != len(em2) {
+		return false
+	}
+
+	for extNum, e1 := range em1 {
+		e2, ok := em2[extNum]
+		if !ok {
+			return false
+		}
+
+		m1, m2 := e1.value, e2.value
+
+		if m1 != nil && m2 != nil {
+			// Both are unencoded.
+			if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+				return false
+			}
+			continue
+		}
+
+		// At least one is encoded. To do a semantically correct comparison
+		// we need to unmarshal them first.
+		var desc *ExtensionDesc
+		if m := extensionMaps[base]; m != nil {
+			desc = m[extNum]
+		}
+		if desc == nil {
+			log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+			continue
+		}
+		var err error
+		if m1 == nil {
+			m1, err = decodeExtension(e1.enc, desc)
+		}
+		if m2 == nil && err == nil {
+			m2, err = decodeExtension(e2.enc, desc)
+		}
+		if err != nil {
+			// The encoded form is invalid.
+			log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+			return false
+		}
+		if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 00000000..eaad2183
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,587 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+	Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	extensionsWrite() map[int32]Extension
+	extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+	extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+	return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+	return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock()   {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, bool) {
+	if ep, ok := p.(extendableProto); ok {
+		return ep, ok
+	}
+	if ep, ok := p.(extendableProtoV1); ok {
+		return extensionAdapter{ep}, ok
+	}
+	return nil, false
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+	// The struct must be indirect so that if a user inadvertently copies a
+	// generated message and its embedded XXX_InternalExtensions, they
+	// avoid the mayhem of a copied mutex.
+	//
+	// The mutex serializes all logically read-only operations to p.extensionMap.
+	// It is up to the client to ensure that write operations to p.extensionMap are
+	// mutually exclusive with other accesses.
+	p *struct {
+		mu           sync.Mutex
+		extensionMap map[int32]Extension
+	}
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+	if e.p == nil {
+		e.p = new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		})
+		e.p.extensionMap = make(map[int32]Extension)
+	}
+	return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use.  It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+	if e.p == nil {
+		return nil, nil
+	}
+	return e.p.extensionMap, &e.p.mu
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+	ExtendedType  Message     // nil pointer to the type that is being extended
+	ExtensionType interface{} // nil pointer to the extension type
+	Field         int32       // field number
+	Name          string      // fully-qualified name of extension, for text formatting
+	Tag           string      // protobuf tag style
+	Filename      string      // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+	t := reflect.TypeOf(ed.ExtensionType)
+	return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+	// When an extension is stored in a message using SetExtension
+	// only desc and value are set. When the message is marshaled
+	// enc will be set to the encoded form of the message.
+	//
+	// When a message is unmarshaled and contains extensions, each
+	// extension will have only enc set. When such an extension is
+	// accessed using GetExtension (or GetExtensions) desc and value
+	// will be set.
+	desc  *ExtensionDesc
+	value interface{}
+	enc   []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+	epb, ok := extendable(base)
+	if !ok {
+		return
+	}
+	extmap := epb.extensionsWrite()
+	extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+	for _, er := range pb.ExtensionRangeArray() {
+		if er.Start <= field && field <= er.End {
+			return true
+		}
+	}
+	return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+	var pbi interface{} = pb
+	// Check the extended type.
+	if ea, ok := pbi.(extensionAdapter); ok {
+		pbi = ea.extendableProtoV1
+	}
+	if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+		return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+	}
+	// Check the range.
+	if !isExtensionField(pb, extension.Field) {
+		return errors.New("proto: bad extension number; not in declared ranges")
+	}
+	return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+	base  reflect.Type
+	field int32
+}
+
+var extProp = struct {
+	sync.RWMutex
+	m map[extPropKey]*Properties
+}{
+	m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+	key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+	extProp.RLock()
+	if prop, ok := extProp.m[key]; ok {
+		extProp.RUnlock()
+		return prop
+	}
+	extProp.RUnlock()
+
+	extProp.Lock()
+	defer extProp.Unlock()
+	// Check again.
+	if prop, ok := extProp.m[key]; ok {
+		return prop
+	}
+
+	prop := new(Properties)
+	prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+	extProp.m[key] = prop
+	return prop
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensions(e *XXX_InternalExtensions) error {
+	m, mu := e.extensionsRead()
+	if m == nil {
+		return nil // fast path
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	return encodeExtensionsMap(m)
+}
+
+// encode encodes any unmarshaled (unencoded) extensions in e.
+func encodeExtensionsMap(m map[int32]Extension) error {
+	for k, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		et := reflect.TypeOf(e.desc.ExtensionType)
+		props := extensionProperties(e.desc)
+
+		p := NewBuffer(nil)
+		// If e.value has type T, the encoder expects a *struct{ X T }.
+		// Pass a *T with a zero field and hope it all works out.
+		x := reflect.New(et)
+		x.Elem().Set(reflect.ValueOf(e.value))
+		if err := props.enc(p, props, toStructPointer(x)); err != nil {
+			return err
+		}
+		e.enc = p.buf
+		m[k] = e
+	}
+	return nil
+}
+
+func extensionsSize(e *XXX_InternalExtensions) (n int) {
+	m, mu := e.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	return extensionsMapSize(m)
+}
+
+func extensionsMapSize(m map[int32]Extension) (n int) {
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		et := reflect.TypeOf(e.desc.ExtensionType)
+		props := extensionProperties(e.desc)
+
+		// If e.value has type T, the encoder expects a *struct{ X T }.
+		// Pass a *T with a zero field and hope it all works out.
+		x := reflect.New(et)
+		x.Elem().Set(reflect.ValueOf(e.value))
+		n += props.size(props, toStructPointer(x))
+	}
+	return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+	// TODO: Check types, field numbers, etc.?
+	epb, ok := extendable(pb)
+	if !ok {
+		return false
+	}
+	extmap, mu := epb.extensionsRead()
+	if extmap == nil {
+		return false
+	}
+	mu.Lock()
+	_, ok = extmap[extension.Field]
+	mu.Unlock()
+	return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+	epb, ok := extendable(pb)
+	if !ok {
+		return
+	}
+	// TODO: Check types, field numbers, etc.?
+	extmap := epb.extensionsWrite()
+	delete(extmap, extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+	epb, ok := extendable(pb)
+	if !ok {
+		return nil, errors.New("proto: not an extendable proto")
+	}
+
+	if err := checkExtensionTypes(epb, extension); err != nil {
+		return nil, err
+	}
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return defaultExtensionValue(extension)
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	e, ok := emap[extension.Field]
+	if !ok {
+		// defaultExtensionValue returns the default value or
+		// ErrMissingExtension if there is no default.
+		return defaultExtensionValue(extension)
+	}
+
+	if e.value != nil {
+		// Already decoded. Check the descriptor, though.
+		if e.desc != extension {
+			// This shouldn't happen. If it does, it means that
+			// GetExtension was called twice with two different
+			// descriptors with the same field number.
+			return nil, errors.New("proto: descriptor conflict")
+		}
+		return e.value, nil
+	}
+
+	v, err := decodeExtension(e.enc, extension)
+	if err != nil {
+		return nil, err
+	}
+
+	// Remember the decoded version and drop the encoded version.
+	// That way it is safe to mutate what we return.
+	e.value = v
+	e.desc = extension
+	e.enc = nil
+	emap[extension.Field] = e
+	return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+	t := reflect.TypeOf(extension.ExtensionType)
+	props := extensionProperties(extension)
+
+	sf, _, err := fieldDefault(t, props)
+	if err != nil {
+		return nil, err
+	}
+
+	if sf == nil || sf.value == nil {
+		// There is no default value.
+		return nil, ErrMissingExtension
+	}
+
+	if t.Kind() != reflect.Ptr {
+		// We do not need to return a Ptr, we can directly return sf.value.
+		return sf.value, nil
+	}
+
+	// We need to return an interface{} that is a pointer to sf.value.
+	value := reflect.New(t).Elem()
+	value.Set(reflect.New(value.Type().Elem()))
+	if sf.kind == reflect.Int32 {
+		// We may have an int32 or an enum, but the underlying data is int32.
+		// Since we can't set an int32 into a non int32 reflect.value directly
+		// set it as a int32.
+		value.Elem().SetInt(int64(sf.value.(int32)))
+	} else {
+		value.Elem().Set(reflect.ValueOf(sf.value))
+	}
+	return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+	o := NewBuffer(b)
+
+	t := reflect.TypeOf(extension.ExtensionType)
+
+	props := extensionProperties(extension)
+
+	// t is a pointer to a struct, pointer to basic type or a slice.
+	// Allocate a "field" to store the pointer/slice itself; the
+	// pointer/slice will be stored here. We pass
+	// the address of this field to props.dec.
+	// This passes a zero field and a *t and lets props.dec
+	// interpret it as a *struct{ x t }.
+	value := reflect.New(t).Elem()
+
+	for {
+		// Discard wire type and field number varint. It isn't needed.
+		if _, err := o.DecodeVarint(); err != nil {
+			return nil, err
+		}
+
+		if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+			return nil, err
+		}
+
+		if o.index >= len(o.buf) {
+			break
+		}
+	}
+	return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+	epb, ok := extendable(pb)
+	if !ok {
+		return nil, errors.New("proto: not an extendable proto")
+	}
+	extensions = make([]interface{}, len(es))
+	for i, e := range es {
+		extensions[i], err = GetExtension(epb, e)
+		if err == ErrMissingExtension {
+			err = nil
+		}
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+	epb, ok := extendable(pb)
+	if !ok {
+		return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb)
+	}
+	registeredExtensions := RegisteredExtensions(pb)
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return nil, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	extensions := make([]*ExtensionDesc, 0, len(emap))
+	for extid, e := range emap {
+		desc := e.desc
+		if desc == nil {
+			desc = registeredExtensions[extid]
+			if desc == nil {
+				desc = &ExtensionDesc{Field: extid}
+			}
+		}
+
+		extensions = append(extensions, desc)
+	}
+	return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+	epb, ok := extendable(pb)
+	if !ok {
+		return errors.New("proto: not an extendable proto")
+	}
+	if err := checkExtensionTypes(epb, extension); err != nil {
+		return err
+	}
+	typ := reflect.TypeOf(extension.ExtensionType)
+	if typ != reflect.TypeOf(value) {
+		return errors.New("proto: bad extension value type")
+	}
+	// nil extension values need to be caught early, because the
+	// encoder can't distinguish an ErrNil due to a nil extension
+	// from an ErrNil due to a missing field. Extensions are
+	// always optional, so the encoder would just swallow the error
+	// and drop all the extensions from the encoded message.
+	if reflect.ValueOf(value).IsNil() {
+		return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+	}
+
+	extmap := epb.extensionsWrite()
+	extmap[extension.Field] = Extension{desc: extension, value: value}
+	return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+	epb, ok := extendable(pb)
+	if !ok {
+		return
+	}
+	m := epb.extensionsWrite()
+	for k := range m {
+		delete(m, k)
+	}
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+	st := reflect.TypeOf(desc.ExtendedType).Elem()
+	m := extensionMaps[st]
+	if m == nil {
+		m = make(map[int32]*ExtensionDesc)
+		extensionMaps[st] = m
+	}
+	if _, ok := m[desc.Field]; ok {
+		panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+	}
+	m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+	return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 00000000..1c225504
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,897 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers.  It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+  - Names are turned from camel_case to CamelCase for export.
+  - There are no methods on v to set fields; just treat
+	them as structure fields.
+  - There are getters that return a field's value if set,
+	and return the field's default value if unset.
+	The getters work even if the receiver is a nil message.
+  - The zero value for a struct is its correct initialization state.
+	All desired fields must be set before marshaling.
+  - A Reset() method will restore a protobuf struct to its zero state.
+  - Non-repeated fields are pointers to the values; nil means unset.
+	That is, optional or required field int32 f becomes F *int32.
+  - Repeated fields are slices.
+  - Helper functions are available to aid the setting of fields.
+	msg.Foo = proto.String("hello") // set field
+  - Constants are defined to hold the default values of all fields that
+	have them.  They have the form Default_StructName_FieldName.
+	Because the getter methods handle defaulted values,
+	direct use of these constants should be rare.
+  - Enums are given type names and maps from names to values.
+	Enum values are prefixed by the enclosing message's name, or by the
+	enum's type name if it is a top-level enum. Enum types have a String
+	method, and a Enum method to assist in message construction.
+  - Nested messages, groups and enums have type names prefixed with the name of
+	the surrounding message type.
+  - Extensions are given descriptor names that start with E_,
+	followed by an underscore-delimited list of the nested messages
+	that contain it (if any) followed by the CamelCased name of the
+	extension field itself.  HasExtension, ClearExtension, GetExtension
+	and SetExtension are functions for manipulating extensions.
+  - Oneof field sets are given a single field in their message,
+	with distinguished wrapper types for each possible field value.
+  - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+  - Non-repeated fields of non-message type are values instead of pointers.
+  - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+	package example;
+
+	enum FOO { X = 17; }
+
+	message Test {
+	  required string label = 1;
+	  optional int32 type = 2 [default=77];
+	  repeated int64 reps = 3;
+	  optional group OptionalGroup = 4 {
+	    required string RequiredField = 5;
+	  }
+	  oneof union {
+	    int32 number = 6;
+	    string name = 7;
+	  }
+	}
+
+The resulting file, test.pb.go, is:
+
+	package example
+
+	import proto "github.com/golang/protobuf/proto"
+	import math "math"
+
+	type FOO int32
+	const (
+		FOO_X FOO = 17
+	)
+	var FOO_name = map[int32]string{
+		17: "X",
+	}
+	var FOO_value = map[string]int32{
+		"X": 17,
+	}
+
+	func (x FOO) Enum() *FOO {
+		p := new(FOO)
+		*p = x
+		return p
+	}
+	func (x FOO) String() string {
+		return proto.EnumName(FOO_name, int32(x))
+	}
+	func (x *FOO) UnmarshalJSON(data []byte) error {
+		value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+		if err != nil {
+			return err
+		}
+		*x = FOO(value)
+		return nil
+	}
+
+	type Test struct {
+		Label         *string             `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+		Type          *int32              `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+		Reps          []int64             `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+		Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+		// Types that are valid to be assigned to Union:
+		//	*Test_Number
+		//	*Test_Name
+		Union            isTest_Union `protobuf_oneof:"union"`
+		XXX_unrecognized []byte       `json:"-"`
+	}
+	func (m *Test) Reset()         { *m = Test{} }
+	func (m *Test) String() string { return proto.CompactTextString(m) }
+	func (*Test) ProtoMessage() {}
+
+	type isTest_Union interface {
+		isTest_Union()
+	}
+
+	type Test_Number struct {
+		Number int32 `protobuf:"varint,6,opt,name=number"`
+	}
+	type Test_Name struct {
+		Name string `protobuf:"bytes,7,opt,name=name"`
+	}
+
+	func (*Test_Number) isTest_Union() {}
+	func (*Test_Name) isTest_Union()   {}
+
+	func (m *Test) GetUnion() isTest_Union {
+		if m != nil {
+			return m.Union
+		}
+		return nil
+	}
+	const Default_Test_Type int32 = 77
+
+	func (m *Test) GetLabel() string {
+		if m != nil && m.Label != nil {
+			return *m.Label
+		}
+		return ""
+	}
+
+	func (m *Test) GetType() int32 {
+		if m != nil && m.Type != nil {
+			return *m.Type
+		}
+		return Default_Test_Type
+	}
+
+	func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+		if m != nil {
+			return m.Optionalgroup
+		}
+		return nil
+	}
+
+	type Test_OptionalGroup struct {
+		RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+	}
+	func (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }
+	func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+	func (m *Test_OptionalGroup) GetRequiredField() string {
+		if m != nil && m.RequiredField != nil {
+			return *m.RequiredField
+		}
+		return ""
+	}
+
+	func (m *Test) GetNumber() int32 {
+		if x, ok := m.GetUnion().(*Test_Number); ok {
+			return x.Number
+		}
+		return 0
+	}
+
+	func (m *Test) GetName() string {
+		if x, ok := m.GetUnion().(*Test_Name); ok {
+			return x.Name
+		}
+		return ""
+	}
+
+	func init() {
+		proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+	}
+
+To create and play with a Test object:
+
+	package main
+
+	import (
+		"log"
+
+		"github.com/golang/protobuf/proto"
+		pb "./example.pb"
+	)
+
+	func main() {
+		test := &pb.Test{
+			Label: proto.String("hello"),
+			Type:  proto.Int32(17),
+			Reps:  []int64{1, 2, 3},
+			Optionalgroup: &pb.Test_OptionalGroup{
+				RequiredField: proto.String("good bye"),
+			},
+			Union: &pb.Test_Name{"fred"},
+		}
+		data, err := proto.Marshal(test)
+		if err != nil {
+			log.Fatal("marshaling error: ", err)
+		}
+		newTest := &pb.Test{}
+		err = proto.Unmarshal(data, newTest)
+		if err != nil {
+			log.Fatal("unmarshaling error: ", err)
+		}
+		// Now test and newTest contain the same data.
+		if test.GetLabel() != newTest.GetLabel() {
+			log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+		}
+		// Use a type switch to determine which oneof was set.
+		switch u := test.Union.(type) {
+		case *pb.Test_Number: // u.Number contains the number.
+		case *pb.Test_Name: // u.Name contains the string.
+		}
+		// etc.
+	}
+*/
+package proto
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"reflect"
+	"sort"
+	"strconv"
+	"sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+	Reset()
+	String() string
+	ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders.  Useful for tuning the library itself.
+type Stats struct {
+	Emalloc uint64 // mallocs in encode
+	Dmalloc uint64 // mallocs in decode
+	Encode  uint64 // number of encodes
+	Decode  uint64 // number of decodes
+	Chit    uint64 // number of cache hits
+	Cmiss   uint64 // number of cache misses
+	Size    uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers.  It may be reused between invocations to
+// reduce memory usage.  It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+	buf   []byte // encode/decode byte stream
+	index int    // read point
+
+	// pools of basic types to amortize allocation.
+	bools   []bool
+	uint32s []uint32
+	uint64s []uint64
+
+	// extra pools, only used with pointer_reflect.go
+	int32s   []int32
+	int64s   []int64
+	float32s []float32
+	float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+	return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+	p.buf = p.buf[0:0] // for reading/writing
+	p.index = 0        // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+	p.buf = s
+	p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+	return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+	return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+	p := new(int32)
+	*p = int32(v)
+	return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+	return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+	return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+	return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+	return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+	return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+	return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name.  Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+	s, ok := m[v]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+	if data[0] == '"' {
+		// New style: enums are strings.
+		var repr string
+		if err := json.Unmarshal(data, &repr); err != nil {
+			return -1, err
+		}
+		val, ok := m[repr]
+		if !ok {
+			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+		}
+		return val, nil
+	}
+	// Old style: enums are ints.
+	var val int32
+	if err := json.Unmarshal(data, &val); err != nil {
+		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+	}
+	return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+	var u uint64
+
+	obuf := p.buf
+	index := p.index
+	p.buf = b
+	p.index = 0
+	depth := 0
+
+	fmt.Printf("\n--- %s ---\n", s)
+
+out:
+	for {
+		for i := 0; i < depth; i++ {
+			fmt.Print("  ")
+		}
+
+		index := p.index
+		if index == len(p.buf) {
+			break
+		}
+
+		op, err := p.DecodeVarint()
+		if err != nil {
+			fmt.Printf("%3d: fetching op err %v\n", index, err)
+			break out
+		}
+		tag := op >> 3
+		wire := op & 7
+
+		switch wire {
+		default:
+			fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+				index, tag, wire)
+			break out
+
+		case WireBytes:
+			var r []byte
+
+			r, err = p.DecodeRawBytes(false)
+			if err != nil {
+				break out
+			}
+			fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+			if len(r) <= 6 {
+				for i := 0; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			} else {
+				for i := 0; i < 3; i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+				fmt.Printf(" ..")
+				for i := len(r) - 3; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			}
+			fmt.Printf("\n")
+
+		case WireFixed32:
+			u, err = p.DecodeFixed32()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+		case WireFixed64:
+			u, err = p.DecodeFixed64()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+		case WireVarint:
+			u, err = p.DecodeVarint()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+		case WireStartGroup:
+			fmt.Printf("%3d: t=%3d start\n", index, tag)
+			depth++
+
+		case WireEndGroup:
+			depth--
+			fmt.Printf("%3d: t=%3d end\n", index, tag)
+		}
+	}
+
+	if depth != 0 {
+		fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+	}
+	fmt.Printf("\n")
+
+	p.buf = obuf
+	p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+	setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+	v = v.Elem()
+
+	defaultMu.RLock()
+	dm, ok := defaults[v.Type()]
+	defaultMu.RUnlock()
+	if !ok {
+		dm = buildDefaultMessage(v.Type())
+		defaultMu.Lock()
+		defaults[v.Type()] = dm
+		defaultMu.Unlock()
+	}
+
+	for _, sf := range dm.scalars {
+		f := v.Field(sf.index)
+		if !f.IsNil() {
+			// field already set
+			continue
+		}
+		dv := sf.value
+		if dv == nil && !zeros {
+			// no explicit default, and don't want to set zeros
+			continue
+		}
+		fptr := f.Addr().Interface() // **T
+		// TODO: Consider batching the allocations we do here.
+		switch sf.kind {
+		case reflect.Bool:
+			b := new(bool)
+			if dv != nil {
+				*b = dv.(bool)
+			}
+			*(fptr.(**bool)) = b
+		case reflect.Float32:
+			f := new(float32)
+			if dv != nil {
+				*f = dv.(float32)
+			}
+			*(fptr.(**float32)) = f
+		case reflect.Float64:
+			f := new(float64)
+			if dv != nil {
+				*f = dv.(float64)
+			}
+			*(fptr.(**float64)) = f
+		case reflect.Int32:
+			// might be an enum
+			if ft := f.Type(); ft != int32PtrType {
+				// enum
+				f.Set(reflect.New(ft.Elem()))
+				if dv != nil {
+					f.Elem().SetInt(int64(dv.(int32)))
+				}
+			} else {
+				// int32 field
+				i := new(int32)
+				if dv != nil {
+					*i = dv.(int32)
+				}
+				*(fptr.(**int32)) = i
+			}
+		case reflect.Int64:
+			i := new(int64)
+			if dv != nil {
+				*i = dv.(int64)
+			}
+			*(fptr.(**int64)) = i
+		case reflect.String:
+			s := new(string)
+			if dv != nil {
+				*s = dv.(string)
+			}
+			*(fptr.(**string)) = s
+		case reflect.Uint8:
+			// exceptional case: []byte
+			var b []byte
+			if dv != nil {
+				db := dv.([]byte)
+				b = make([]byte, len(db))
+				copy(b, db)
+			} else {
+				b = []byte{}
+			}
+			*(fptr.(*[]byte)) = b
+		case reflect.Uint32:
+			u := new(uint32)
+			if dv != nil {
+				*u = dv.(uint32)
+			}
+			*(fptr.(**uint32)) = u
+		case reflect.Uint64:
+			u := new(uint64)
+			if dv != nil {
+				*u = dv.(uint64)
+			}
+			*(fptr.(**uint64)) = u
+		default:
+			log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+		}
+	}
+
+	for _, ni := range dm.nested {
+		f := v.Field(ni)
+		// f is *T or []*T or map[T]*T
+		switch f.Kind() {
+		case reflect.Ptr:
+			if f.IsNil() {
+				continue
+			}
+			setDefaults(f, recur, zeros)
+
+		case reflect.Slice:
+			for i := 0; i < f.Len(); i++ {
+				e := f.Index(i)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+
+		case reflect.Map:
+			for _, k := range f.MapKeys() {
+				e := f.MapIndex(k)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+		}
+	}
+}
+
+var (
+	// defaults maps a protocol buffer struct type to a slice of the fields,
+	// with its scalar fields set to their proto-declared non-zero default values.
+	defaultMu sync.RWMutex
+	defaults  = make(map[reflect.Type]defaultMessage)
+
+	int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+	scalars []scalarField
+	nested  []int // struct field index of nested messages
+}
+
+type scalarField struct {
+	index int          // struct field index
+	kind  reflect.Kind // element type (the T in *T or []T)
+	value interface{}  // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+	sprop := GetProperties(t)
+	for _, prop := range sprop.Prop {
+		fi, ok := sprop.decoderTags.get(prop.Tag)
+		if !ok {
+			// XXX_unrecognized
+			continue
+		}
+		ft := t.Field(fi).Type
+
+		sf, nested, err := fieldDefault(ft, prop)
+		switch {
+		case err != nil:
+			log.Print(err)
+		case nested:
+			dm.nested = append(dm.nested, fi)
+		case sf != nil:
+			sf.index = fi
+			dm.scalars = append(dm.scalars, *sf)
+		}
+	}
+
+	return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+	var canHaveDefault bool
+	switch ft.Kind() {
+	case reflect.Ptr:
+		if ft.Elem().Kind() == reflect.Struct {
+			nestedMessage = true
+		} else {
+			canHaveDefault = true // proto2 scalar field
+		}
+
+	case reflect.Slice:
+		switch ft.Elem().Kind() {
+		case reflect.Ptr:
+			nestedMessage = true // repeated message
+		case reflect.Uint8:
+			canHaveDefault = true // bytes field
+		}
+
+	case reflect.Map:
+		if ft.Elem().Kind() == reflect.Ptr {
+			nestedMessage = true // map with message values
+		}
+	}
+
+	if !canHaveDefault {
+		if nestedMessage {
+			return nil, true, nil
+		}
+		return nil, false, nil
+	}
+
+	// We now know that ft is a pointer or slice.
+	sf = &scalarField{kind: ft.Elem().Kind()}
+
+	// scalar fields without defaults
+	if !prop.HasDefault {
+		return sf, false, nil
+	}
+
+	// a scalar field: either *T or []byte
+	switch ft.Elem().Kind() {
+	case reflect.Bool:
+		x, err := strconv.ParseBool(prop.Default)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Float32:
+		x, err := strconv.ParseFloat(prop.Default, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+		}
+		sf.value = float32(x)
+	case reflect.Float64:
+		x, err := strconv.ParseFloat(prop.Default, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Int32:
+		x, err := strconv.ParseInt(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+		}
+		sf.value = int32(x)
+	case reflect.Int64:
+		x, err := strconv.ParseInt(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.String:
+		sf.value = prop.Default
+	case reflect.Uint8:
+		// []byte (not *uint8)
+		sf.value = []byte(prop.Default)
+	case reflect.Uint32:
+		x, err := strconv.ParseUint(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+		}
+		sf.value = uint32(x)
+	case reflect.Uint64:
+		x, err := strconv.ParseUint(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	default:
+		return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+	}
+
+	return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+	s := mapKeySorter{
+		vs: vs,
+		// default Less function: textual comparison
+		less: func(a, b reflect.Value) bool {
+			return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+		},
+	}
+
+	// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+	// numeric keys are sorted numerically.
+	if len(vs) == 0 {
+		return s
+	}
+	switch vs[0].Kind() {
+	case reflect.Int32, reflect.Int64:
+		s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+	case reflect.Uint32, reflect.Uint64:
+		s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+	}
+
+	return s
+}
+
+type mapKeySorter struct {
+	vs   []reflect.Value
+	less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int      { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+	return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint32, reflect.Uint64:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.String:
+		return v.String() == ""
+	}
+	return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 00000000..fd982dec
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,311 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+	TypeId  *int32 `protobuf:"varint,2,req,name=type_id"`
+	Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+	Item             []*_MessageSet_Item `protobuf:"group,1,rep"`
+	XXX_unrecognized []byte
+	// TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+	MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return nil
+	}
+	id := mti.MessageTypeId()
+	for _, item := range ms.Item {
+		if *item.TypeId == id {
+			return item
+		}
+	}
+	return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+	if ms.find(pb) != nil {
+		return true
+	}
+	return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+	if item := ms.find(pb); item != nil {
+		return Unmarshal(item.Message, pb)
+	}
+	if _, ok := pb.(messageTypeIder); !ok {
+		return errNoMessageTypeID
+	}
+	return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+	msg, err := Marshal(pb)
+	if err != nil {
+		return err
+	}
+	if item := ms.find(pb); item != nil {
+		// reuse existing item
+		item.Message = msg
+		return nil
+	}
+
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return errNoMessageTypeID
+	}
+
+	mtid := mti.MessageTypeId()
+	ms.Item = append(ms.Item, &_MessageSet_Item{
+		TypeId:  &mtid,
+		Message: msg,
+	})
+	return nil
+}
+
+func (ms *messageSet) Reset()         { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage()     {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+	i := 0
+	for ; buf[i]&0x80 != 0; i++ {
+	}
+	return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(exts interface{}) ([]byte, error) {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		if err := encodeExtensions(exts); err != nil {
+			return nil, err
+		}
+		m, _ = exts.extensionsRead()
+	case map[int32]Extension:
+		if err := encodeExtensionsMap(exts); err != nil {
+			return nil, err
+		}
+		m = exts
+	default:
+		return nil, errors.New("proto: not an extension map")
+	}
+
+	// Sort extension IDs to provide a deterministic encoding.
+	// See also enc_map in encode.go.
+	ids := make([]int, 0, len(m))
+	for id := range m {
+		ids = append(ids, int(id))
+	}
+	sort.Ints(ids)
+
+	ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+	for _, id := range ids {
+		e := m[int32(id)]
+		// Remove the wire type and field number varint, as well as the length varint.
+		msg := skipVarint(skipVarint(e.enc))
+
+		ms.Item = append(ms.Item, &_MessageSet_Item{
+			TypeId:  Int32(int32(id)),
+			Message: msg,
+		})
+	}
+	return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, exts interface{}) error {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m = exts.extensionsWrite()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return errors.New("proto: not an extension map")
+	}
+
+	ms := new(messageSet)
+	if err := Unmarshal(buf, ms); err != nil {
+		return err
+	}
+	for _, item := range ms.Item {
+		id := *item.TypeId
+		msg := item.Message
+
+		// Restore wire type and field number varint, plus length varint.
+		// Be careful to preserve duplicate items.
+		b := EncodeVarint(uint64(id)<<3 | WireBytes)
+		if ext, ok := m[id]; ok {
+			// Existing data; rip off the tag and length varint
+			// so we join the new data correctly.
+			// We can assume that ext.enc is set because we are unmarshaling.
+			o := ext.enc[len(b):]   // skip wire type and field number
+			_, n := DecodeVarint(o) // calculate length of length varint
+			o = o[n:]               // skip length varint
+			msg = append(o, msg...) // join old data and new data
+		}
+		b = append(b, EncodeVarint(uint64(len(msg)))...)
+		b = append(b, msg...)
+
+		m[id] = Extension{enc: b}
+	}
+	return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m, _ = exts.extensionsRead()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return nil, errors.New("proto: not an extension map")
+	}
+	var b bytes.Buffer
+	b.WriteByte('{')
+
+	// Process the map in key order for deterministic output.
+	ids := make([]int32, 0, len(m))
+	for id := range m {
+		ids = append(ids, id)
+	}
+	sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+	for i, id := range ids {
+		ext := m[id]
+		if i > 0 {
+			b.WriteByte(',')
+		}
+
+		msd, ok := messageSetMap[id]
+		if !ok {
+			// Unknown type; we can't render it, so skip it.
+			continue
+		}
+		fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+		x := ext.value
+		if x == nil {
+			x = reflect.New(msd.t.Elem()).Interface()
+			if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+				return nil, err
+			}
+		}
+		d, err := json.Marshal(x)
+		if err != nil {
+			return nil, err
+		}
+		b.Write(d)
+	}
+	b.WriteByte('}')
+	return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
+	// Common-case fast path.
+	if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+		return nil
+	}
+
+	// This is fairly tricky, and it's not clear that it is needed.
+	return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+	t    reflect.Type // pointer to struct
+	name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+	messageSetMap[fieldNum] = messageSetDesc{
+		t:    reflect.TypeOf(m),
+		name: name,
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 00000000..fb512e2e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,484 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+	"math"
+	"reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+	v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+	return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+	return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+	return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+	// Special case: an extension map entry with a value of type T
+	// passes a *T to the struct-handling code with a zero field,
+	// expecting that it will be treated as equivalent to *struct{ X T },
+	// which has the same memory layout. We have to handle that case
+	// specially, because reflect will panic if we call FieldByIndex on a
+	// non-struct.
+	if f == nil {
+		return p.v.Elem()
+	}
+
+	return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+	return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+	return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+	return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+	return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+	return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+	return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+	return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+	return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+	return structPointer_ifield(p, f).(*[]string)
+}
+
+// Extensions returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+	return structPointer_ifield(p, f).(*XXX_InternalExtensions)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+	return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+	return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+	structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+	return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+	return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+	v reflect.Value
+}
+
+func (p structPointerSlice) Len() int                  { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+	p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+	int32Type   = reflect.TypeOf(int32(0))
+	uint32Type  = reflect.TypeOf(uint32(0))
+	float32Type = reflect.TypeOf(float32(0))
+	int64Type   = reflect.TypeOf(int64(0))
+	uint64Type  = reflect.TypeOf(uint64(0))
+	float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+	v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+	return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+	t := p.v.Type().Elem()
+	switch t {
+	case int32Type:
+		if len(o.int32s) == 0 {
+			o.int32s = make([]int32, uint32PoolSize)
+		}
+		o.int32s[0] = int32(x)
+		p.v.Set(reflect.ValueOf(&o.int32s[0]))
+		o.int32s = o.int32s[1:]
+		return
+	case uint32Type:
+		if len(o.uint32s) == 0 {
+			o.uint32s = make([]uint32, uint32PoolSize)
+		}
+		o.uint32s[0] = x
+		p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+		o.uint32s = o.uint32s[1:]
+		return
+	case float32Type:
+		if len(o.float32s) == 0 {
+			o.float32s = make([]float32, uint32PoolSize)
+		}
+		o.float32s[0] = math.Float32frombits(x)
+		p.v.Set(reflect.ValueOf(&o.float32s[0]))
+		o.float32s = o.float32s[1:]
+		return
+	}
+
+	// must be enum
+	p.v.Set(reflect.New(t))
+	p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+	elem := p.v.Elem()
+	switch elem.Kind() {
+	case reflect.Int32:
+		return uint32(elem.Int())
+	case reflect.Uint32:
+		return uint32(elem.Uint())
+	case reflect.Float32:
+		return math.Float32bits(float32(elem.Float()))
+	}
+	panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+	return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+	v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+	switch p.v.Type() {
+	case int32Type:
+		p.v.SetInt(int64(x))
+		return
+	case uint32Type:
+		p.v.SetUint(uint64(x))
+		return
+	case float32Type:
+		p.v.SetFloat(float64(math.Float32frombits(x)))
+		return
+	}
+
+	// must be enum
+	p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+	elem := p.v
+	switch elem.Kind() {
+	case reflect.Int32:
+		return uint32(elem.Int())
+	case reflect.Uint32:
+		return uint32(elem.Uint())
+	case reflect.Float32:
+		return math.Float32bits(float32(elem.Float()))
+	}
+	panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+	return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+	v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+	n, m := p.v.Len(), p.v.Cap()
+	if n < m {
+		p.v.SetLen(n + 1)
+	} else {
+		t := p.v.Type().Elem()
+		p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+	}
+	elem := p.v.Index(n)
+	switch elem.Kind() {
+	case reflect.Int32:
+		elem.SetInt(int64(int32(x)))
+	case reflect.Uint32:
+		elem.SetUint(uint64(x))
+	case reflect.Float32:
+		elem.SetFloat(float64(math.Float32frombits(x)))
+	}
+}
+
+func (p word32Slice) Len() int {
+	return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+	elem := p.v.Index(i)
+	switch elem.Kind() {
+	case reflect.Int32:
+		return uint32(elem.Int())
+	case reflect.Uint32:
+		return uint32(elem.Uint())
+	case reflect.Float32:
+		return math.Float32bits(float32(elem.Float()))
+	}
+	panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+	return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+	v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+	t := p.v.Type().Elem()
+	switch t {
+	case int64Type:
+		if len(o.int64s) == 0 {
+			o.int64s = make([]int64, uint64PoolSize)
+		}
+		o.int64s[0] = int64(x)
+		p.v.Set(reflect.ValueOf(&o.int64s[0]))
+		o.int64s = o.int64s[1:]
+		return
+	case uint64Type:
+		if len(o.uint64s) == 0 {
+			o.uint64s = make([]uint64, uint64PoolSize)
+		}
+		o.uint64s[0] = x
+		p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+		o.uint64s = o.uint64s[1:]
+		return
+	case float64Type:
+		if len(o.float64s) == 0 {
+			o.float64s = make([]float64, uint64PoolSize)
+		}
+		o.float64s[0] = math.Float64frombits(x)
+		p.v.Set(reflect.ValueOf(&o.float64s[0]))
+		o.float64s = o.float64s[1:]
+		return
+	}
+	panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+	return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+	elem := p.v.Elem()
+	switch elem.Kind() {
+	case reflect.Int64:
+		return uint64(elem.Int())
+	case reflect.Uint64:
+		return elem.Uint()
+	case reflect.Float64:
+		return math.Float64bits(elem.Float())
+	}
+	panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+	return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+	v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+	switch p.v.Type() {
+	case int64Type:
+		p.v.SetInt(int64(x))
+		return
+	case uint64Type:
+		p.v.SetUint(x)
+		return
+	case float64Type:
+		p.v.SetFloat(math.Float64frombits(x))
+		return
+	}
+	panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+	elem := p.v
+	switch elem.Kind() {
+	case reflect.Int64:
+		return uint64(elem.Int())
+	case reflect.Uint64:
+		return elem.Uint()
+	case reflect.Float64:
+		return math.Float64bits(elem.Float())
+	}
+	panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+	return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+	v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+	n, m := p.v.Len(), p.v.Cap()
+	if n < m {
+		p.v.SetLen(n + 1)
+	} else {
+		t := p.v.Type().Elem()
+		p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+	}
+	elem := p.v.Index(n)
+	switch elem.Kind() {
+	case reflect.Int64:
+		elem.SetInt(int64(int64(x)))
+	case reflect.Uint64:
+		elem.SetUint(uint64(x))
+	case reflect.Float64:
+		elem.SetFloat(float64(math.Float64frombits(x)))
+	}
+}
+
+func (p word64Slice) Len() int {
+	return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+	elem := p.v.Index(i)
+	switch elem.Kind() {
+	case reflect.Int64:
+		return uint64(elem.Int())
+	case reflect.Uint64:
+		return uint64(elem.Uint())
+	case reflect.Float64:
+		return math.Float64bits(float64(elem.Float()))
+	}
+	panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+	return word64Slice{structPointer_field(p, f)}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 00000000..6b5567d4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,270 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+//	type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+	return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+	return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+	return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+	return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+	return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+	return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+	return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+	return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+	return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+	return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+	return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+	return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
+	return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+	return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+	return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+	*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+	return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+	return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int                  { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer)    { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+	return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+	if len(o.uint32s) == 0 {
+		o.uint32s = make([]uint32, uint32PoolSize)
+	}
+	o.uint32s[0] = x
+	*p = &o.uint32s[0]
+	o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+	return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+	return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+	*p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+	return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+	return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32)    { *v = append(*v, x) }
+func (v *word32Slice) Len() int           { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+	return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+	if len(o.uint64s) == 0 {
+		o.uint64s = make([]uint64, uint64PoolSize)
+	}
+	o.uint64s[0] = x
+	*p = &o.uint64s[0]
+	o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+	return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+	return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+	return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+	*p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+	return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+	return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64)    { *v = append(*v, x) }
+func (v *word64Slice) Len() int           { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+	return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 00000000..ec2289c0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,872 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+	WireVarint     = 0
+	WireFixed64    = 1
+	WireBytes      = 2
+	WireStartGroup = 3
+	WireEndGroup   = 4
+	WireFixed32    = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+	fastTags []int
+	slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+	if t > 0 && t < tagMapFastLimit {
+		if t >= len(p.fastTags) {
+			return 0, false
+		}
+		fi := p.fastTags[t]
+		return fi, fi >= 0
+	}
+	fi, ok := p.slowTags[t]
+	return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+	if t > 0 && t < tagMapFastLimit {
+		for len(p.fastTags) < t+1 {
+			p.fastTags = append(p.fastTags, -1)
+		}
+		p.fastTags[t] = fi
+		return
+	}
+	if p.slowTags == nil {
+		p.slowTags = make(map[int]int)
+	}
+	p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+	Prop             []*Properties  // properties for each field
+	reqCount         int            // required count
+	decoderTags      tagMap         // map from proto tag to struct field number
+	decoderOrigNames map[string]int // map from original name to struct field number
+	order            []int          // list of struct field numbers in tag order
+	unrecField       field          // field id of the XXX_unrecognized []byte field
+	extendable       bool           // is this an extendable proto
+
+	oneofMarshaler   oneofMarshaler
+	oneofUnmarshaler oneofUnmarshaler
+	oneofSizer       oneofSizer
+	stype            reflect.Type
+
+	// OneofTypes contains information about the oneof fields in this message.
+	// It is keyed by the original name of a field.
+	OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+	Type  reflect.Type // pointer to generated struct type for this oneof field
+	Field int          // struct field number of the containing oneof in the message
+	Prop  *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+	return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+	Name     string // name of the field, for error messages
+	OrigName string // original name before protocol compiler (always set)
+	JSONName string // name to use for JSON; determined by protoc
+	Wire     string
+	WireType int
+	Tag      int
+	Required bool
+	Optional bool
+	Repeated bool
+	Packed   bool   // relevant for repeated primitives only
+	Enum     string // set for enum types only
+	proto3   bool   // whether this is known to be a proto3 field; set for []byte only
+	oneof    bool   // whether this is a oneof field
+
+	Default    string // default value
+	HasDefault bool   // whether an explicit default was provided
+	def_uint64 uint64
+
+	enc           encoder
+	valEnc        valueEncoder // set for bool and numeric types only
+	field         field
+	tagcode       []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+	tagbuf        [8]byte
+	stype         reflect.Type      // set for struct types only
+	sprop         *StructProperties // set for struct types only
+	isMarshaler   bool
+	isUnmarshaler bool
+
+	mtype    reflect.Type // set for map types only
+	mkeyprop *Properties  // set for map types only
+	mvalprop *Properties  // set for map types only
+
+	size    sizer
+	valSize valueSizer // set for bool and numeric types only
+
+	dec    decoder
+	valDec valueDecoder // set for bool and numeric types only
+
+	// If this is a packable field, this will be the decoder for the packed version of the field.
+	packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+	s := p.Wire
+	s = ","
+	s += strconv.Itoa(p.Tag)
+	if p.Required {
+		s += ",req"
+	}
+	if p.Optional {
+		s += ",opt"
+	}
+	if p.Repeated {
+		s += ",rep"
+	}
+	if p.Packed {
+		s += ",packed"
+	}
+	s += ",name=" + p.OrigName
+	if p.JSONName != p.OrigName {
+		s += ",json=" + p.JSONName
+	}
+	if p.proto3 {
+		s += ",proto3"
+	}
+	if p.oneof {
+		s += ",oneof"
+	}
+	if len(p.Enum) > 0 {
+		s += ",enum=" + p.Enum
+	}
+	if p.HasDefault {
+		s += ",def=" + p.Default
+	}
+	return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+	// "bytes,49,opt,name=foo,def=hello!"
+	fields := strings.Split(s, ",") // breaks def=, but handled below.
+	if len(fields) < 2 {
+		fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+		return
+	}
+
+	p.Wire = fields[0]
+	switch p.Wire {
+	case "varint":
+		p.WireType = WireVarint
+		p.valEnc = (*Buffer).EncodeVarint
+		p.valDec = (*Buffer).DecodeVarint
+		p.valSize = sizeVarint
+	case "fixed32":
+		p.WireType = WireFixed32
+		p.valEnc = (*Buffer).EncodeFixed32
+		p.valDec = (*Buffer).DecodeFixed32
+		p.valSize = sizeFixed32
+	case "fixed64":
+		p.WireType = WireFixed64
+		p.valEnc = (*Buffer).EncodeFixed64
+		p.valDec = (*Buffer).DecodeFixed64
+		p.valSize = sizeFixed64
+	case "zigzag32":
+		p.WireType = WireVarint
+		p.valEnc = (*Buffer).EncodeZigzag32
+		p.valDec = (*Buffer).DecodeZigzag32
+		p.valSize = sizeZigzag32
+	case "zigzag64":
+		p.WireType = WireVarint
+		p.valEnc = (*Buffer).EncodeZigzag64
+		p.valDec = (*Buffer).DecodeZigzag64
+		p.valSize = sizeZigzag64
+	case "bytes", "group":
+		p.WireType = WireBytes
+		// no numeric converter for non-numeric types
+	default:
+		fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+		return
+	}
+
+	var err error
+	p.Tag, err = strconv.Atoi(fields[1])
+	if err != nil {
+		return
+	}
+
+	for i := 2; i < len(fields); i++ {
+		f := fields[i]
+		switch {
+		case f == "req":
+			p.Required = true
+		case f == "opt":
+			p.Optional = true
+		case f == "rep":
+			p.Repeated = true
+		case f == "packed":
+			p.Packed = true
+		case strings.HasPrefix(f, "name="):
+			p.OrigName = f[5:]
+		case strings.HasPrefix(f, "json="):
+			p.JSONName = f[5:]
+		case strings.HasPrefix(f, "enum="):
+			p.Enum = f[5:]
+		case f == "proto3":
+			p.proto3 = true
+		case f == "oneof":
+			p.oneof = true
+		case strings.HasPrefix(f, "def="):
+			p.HasDefault = true
+			p.Default = f[4:] // rest of string
+			if i+1 < len(fields) {
+				// Commas aren't escaped, and def is always last.
+				p.Default += "," + strings.Join(fields[i+1:], ",")
+				break
+			}
+		}
+	}
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+	fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+	p.enc = nil
+	p.dec = nil
+	p.size = nil
+
+	switch t1 := typ; t1.Kind() {
+	default:
+		fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+	// proto3 scalar types
+
+	case reflect.Bool:
+		p.enc = (*Buffer).enc_proto3_bool
+		p.dec = (*Buffer).dec_proto3_bool
+		p.size = size_proto3_bool
+	case reflect.Int32:
+		p.enc = (*Buffer).enc_proto3_int32
+		p.dec = (*Buffer).dec_proto3_int32
+		p.size = size_proto3_int32
+	case reflect.Uint32:
+		p.enc = (*Buffer).enc_proto3_uint32
+		p.dec = (*Buffer).dec_proto3_int32 // can reuse
+		p.size = size_proto3_uint32
+	case reflect.Int64, reflect.Uint64:
+		p.enc = (*Buffer).enc_proto3_int64
+		p.dec = (*Buffer).dec_proto3_int64
+		p.size = size_proto3_int64
+	case reflect.Float32:
+		p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+		p.dec = (*Buffer).dec_proto3_int32
+		p.size = size_proto3_uint32
+	case reflect.Float64:
+		p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+		p.dec = (*Buffer).dec_proto3_int64
+		p.size = size_proto3_int64
+	case reflect.String:
+		p.enc = (*Buffer).enc_proto3_string
+		p.dec = (*Buffer).dec_proto3_string
+		p.size = size_proto3_string
+
+	case reflect.Ptr:
+		switch t2 := t1.Elem(); t2.Kind() {
+		default:
+			fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+			break
+		case reflect.Bool:
+			p.enc = (*Buffer).enc_bool
+			p.dec = (*Buffer).dec_bool
+			p.size = size_bool
+		case reflect.Int32:
+			p.enc = (*Buffer).enc_int32
+			p.dec = (*Buffer).dec_int32
+			p.size = size_int32
+		case reflect.Uint32:
+			p.enc = (*Buffer).enc_uint32
+			p.dec = (*Buffer).dec_int32 // can reuse
+			p.size = size_uint32
+		case reflect.Int64, reflect.Uint64:
+			p.enc = (*Buffer).enc_int64
+			p.dec = (*Buffer).dec_int64
+			p.size = size_int64
+		case reflect.Float32:
+			p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+			p.dec = (*Buffer).dec_int32
+			p.size = size_uint32
+		case reflect.Float64:
+			p.enc = (*Buffer).enc_int64 // can just treat them as bits
+			p.dec = (*Buffer).dec_int64
+			p.size = size_int64
+		case reflect.String:
+			p.enc = (*Buffer).enc_string
+			p.dec = (*Buffer).dec_string
+			p.size = size_string
+		case reflect.Struct:
+			p.stype = t1.Elem()
+			p.isMarshaler = isMarshaler(t1)
+			p.isUnmarshaler = isUnmarshaler(t1)
+			if p.Wire == "bytes" {
+				p.enc = (*Buffer).enc_struct_message
+				p.dec = (*Buffer).dec_struct_message
+				p.size = size_struct_message
+			} else {
+				p.enc = (*Buffer).enc_struct_group
+				p.dec = (*Buffer).dec_struct_group
+				p.size = size_struct_group
+			}
+		}
+
+	case reflect.Slice:
+		switch t2 := t1.Elem(); t2.Kind() {
+		default:
+			logNoSliceEnc(t1, t2)
+			break
+		case reflect.Bool:
+			if p.Packed {
+				p.enc = (*Buffer).enc_slice_packed_bool
+				p.size = size_slice_packed_bool
+			} else {
+				p.enc = (*Buffer).enc_slice_bool
+				p.size = size_slice_bool
+			}
+			p.dec = (*Buffer).dec_slice_bool
+			p.packedDec = (*Buffer).dec_slice_packed_bool
+		case reflect.Int32:
+			if p.Packed {
+				p.enc = (*Buffer).enc_slice_packed_int32
+				p.size = size_slice_packed_int32
+			} else {
+				p.enc = (*Buffer).enc_slice_int32
+				p.size = size_slice_int32
+			}
+			p.dec = (*Buffer).dec_slice_int32
+			p.packedDec = (*Buffer).dec_slice_packed_int32
+		case reflect.Uint32:
+			if p.Packed {
+				p.enc = (*Buffer).enc_slice_packed_uint32
+				p.size = size_slice_packed_uint32
+			} else {
+				p.enc = (*Buffer).enc_slice_uint32
+				p.size = size_slice_uint32
+			}
+			p.dec = (*Buffer).dec_slice_int32
+			p.packedDec = (*Buffer).dec_slice_packed_int32
+		case reflect.Int64, reflect.Uint64:
+			if p.Packed {
+				p.enc = (*Buffer).enc_slice_packed_int64
+				p.size = size_slice_packed_int64
+			} else {
+				p.enc = (*Buffer).enc_slice_int64
+				p.size = size_slice_int64
+			}
+			p.dec = (*Buffer).dec_slice_int64
+			p.packedDec = (*Buffer).dec_slice_packed_int64
+		case reflect.Uint8:
+			p.dec = (*Buffer).dec_slice_byte
+			if p.proto3 {
+				p.enc = (*Buffer).enc_proto3_slice_byte
+				p.size = size_proto3_slice_byte
+			} else {
+				p.enc = (*Buffer).enc_slice_byte
+				p.size = size_slice_byte
+			}
+		case reflect.Float32, reflect.Float64:
+			switch t2.Bits() {
+			case 32:
+				// can just treat them as bits
+				if p.Packed {
+					p.enc = (*Buffer).enc_slice_packed_uint32
+					p.size = size_slice_packed_uint32
+				} else {
+					p.enc = (*Buffer).enc_slice_uint32
+					p.size = size_slice_uint32
+				}
+				p.dec = (*Buffer).dec_slice_int32
+				p.packedDec = (*Buffer).dec_slice_packed_int32
+			case 64:
+				// can just treat them as bits
+				if p.Packed {
+					p.enc = (*Buffer).enc_slice_packed_int64
+					p.size = size_slice_packed_int64
+				} else {
+					p.enc = (*Buffer).enc_slice_int64
+					p.size = size_slice_int64
+				}
+				p.dec = (*Buffer).dec_slice_int64
+				p.packedDec = (*Buffer).dec_slice_packed_int64
+			default:
+				logNoSliceEnc(t1, t2)
+				break
+			}
+		case reflect.String:
+			p.enc = (*Buffer).enc_slice_string
+			p.dec = (*Buffer).dec_slice_string
+			p.size = size_slice_string
+		case reflect.Ptr:
+			switch t3 := t2.Elem(); t3.Kind() {
+			default:
+				fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+				break
+			case reflect.Struct:
+				p.stype = t2.Elem()
+				p.isMarshaler = isMarshaler(t2)
+				p.isUnmarshaler = isUnmarshaler(t2)
+				if p.Wire == "bytes" {
+					p.enc = (*Buffer).enc_slice_struct_message
+					p.dec = (*Buffer).dec_slice_struct_message
+					p.size = size_slice_struct_message
+				} else {
+					p.enc = (*Buffer).enc_slice_struct_group
+					p.dec = (*Buffer).dec_slice_struct_group
+					p.size = size_slice_struct_group
+				}
+			}
+		case reflect.Slice:
+			switch t2.Elem().Kind() {
+			default:
+				fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+				break
+			case reflect.Uint8:
+				p.enc = (*Buffer).enc_slice_slice_byte
+				p.dec = (*Buffer).dec_slice_slice_byte
+				p.size = size_slice_slice_byte
+			}
+		}
+
+	case reflect.Map:
+		p.enc = (*Buffer).enc_new_map
+		p.dec = (*Buffer).dec_new_map
+		p.size = size_new_map
+
+		p.mtype = t1
+		p.mkeyprop = &Properties{}
+		p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+		p.mvalprop = &Properties{}
+		vtype := p.mtype.Elem()
+		if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+			// The value type is not a message (*T) or bytes ([]byte),
+			// so we need encoders for the pointer to this type.
+			vtype = reflect.PtrTo(vtype)
+		}
+		p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+	}
+
+	// precalculate tag code
+	wire := p.WireType
+	if p.Packed {
+		wire = WireBytes
+	}
+	x := uint32(p.Tag)<<3 | uint32(wire)
+	i := 0
+	for i = 0; x > 127; i++ {
+		p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+		x >>= 7
+	}
+	p.tagbuf[i] = uint8(x)
+	p.tagcode = p.tagbuf[0 : i+1]
+
+	if p.stype != nil {
+		if lockGetProp {
+			p.sprop = GetProperties(p.stype)
+		} else {
+			p.sprop = getPropertiesLocked(p.stype)
+		}
+	}
+}
+
+var (
+	marshalerType   = reflect.TypeOf((*Marshaler)(nil)).Elem()
+	unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+	// We're checking for (likely) pointer-receiver methods
+	// so if t is not a pointer, something is very wrong.
+	// The calls above only invoke isMarshaler on pointer types.
+	if t.Kind() != reflect.Ptr {
+		panic("proto: misuse of isMarshaler")
+	}
+	return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+	// We're checking for (likely) pointer-receiver methods
+	// so if t is not a pointer, something is very wrong.
+	// The calls above only invoke isUnmarshaler on pointer types.
+	if t.Kind() != reflect.Ptr {
+		panic("proto: misuse of isUnmarshaler")
+	}
+	return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+	p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+	// "bytes,49,opt,def=hello!"
+	p.Name = name
+	p.OrigName = name
+	if f != nil {
+		p.field = toField(f)
+	}
+	if tag == "" {
+		return
+	}
+	p.Parse(tag)
+	p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+	propertiesMu  sync.RWMutex
+	propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+	if t.Kind() != reflect.Struct {
+		panic("proto: type must have kind struct")
+	}
+
+	// Most calls to GetProperties in a long-running program will be
+	// retrieving details for types we have seen before.
+	propertiesMu.RLock()
+	sprop, ok := propertiesMap[t]
+	propertiesMu.RUnlock()
+	if ok {
+		if collectStats {
+			stats.Chit++
+		}
+		return sprop
+	}
+
+	propertiesMu.Lock()
+	sprop = getPropertiesLocked(t)
+	propertiesMu.Unlock()
+	return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+	if prop, ok := propertiesMap[t]; ok {
+		if collectStats {
+			stats.Chit++
+		}
+		return prop
+	}
+	if collectStats {
+		stats.Cmiss++
+	}
+
+	prop := new(StructProperties)
+	// in case of recursive protos, fill this in now.
+	propertiesMap[t] = prop
+
+	// build properties
+	prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||
+		reflect.PtrTo(t).Implements(extendableProtoV1Type)
+	prop.unrecField = invalidField
+	prop.Prop = make([]*Properties, t.NumField())
+	prop.order = make([]int, t.NumField())
+
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		p := new(Properties)
+		name := f.Name
+		p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+		if f.Name == "XXX_InternalExtensions" { // special case
+			p.enc = (*Buffer).enc_exts
+			p.dec = nil // not needed
+			p.size = size_exts
+		} else if f.Name == "XXX_extensions" { // special case
+			p.enc = (*Buffer).enc_map
+			p.dec = nil // not needed
+			p.size = size_map
+		} else if f.Name == "XXX_unrecognized" { // special case
+			prop.unrecField = toField(&f)
+		}
+		oneof := f.Tag.Get("protobuf_oneof") // special case
+		if oneof != "" {
+			// Oneof fields don't use the traditional protobuf tag.
+			p.OrigName = oneof
+		}
+		prop.Prop[i] = p
+		prop.order[i] = i
+		if debug {
+			print(i, " ", f.Name, " ", t.String(), " ")
+			if p.Tag > 0 {
+				print(p.String())
+			}
+			print("\n")
+		}
+		if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" {
+			fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+		}
+	}
+
+	// Re-order prop.order.
+	sort.Sort(prop)
+
+	type oneofMessage interface {
+		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+	}
+	if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+		var oots []interface{}
+		prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+		prop.stype = t
+
+		// Interpret oneof metadata.
+		prop.OneofTypes = make(map[string]*OneofProperties)
+		for _, oot := range oots {
+			oop := &OneofProperties{
+				Type: reflect.ValueOf(oot).Type(), // *T
+				Prop: new(Properties),
+			}
+			sft := oop.Type.Elem().Field(0)
+			oop.Prop.Name = sft.Name
+			oop.Prop.Parse(sft.Tag.Get("protobuf"))
+			// There will be exactly one interface field that
+			// this new value is assignable to.
+			for i := 0; i < t.NumField(); i++ {
+				f := t.Field(i)
+				if f.Type.Kind() != reflect.Interface {
+					continue
+				}
+				if !oop.Type.AssignableTo(f.Type) {
+					continue
+				}
+				oop.Field = i
+				break
+			}
+			prop.OneofTypes[oop.Prop.OrigName] = oop
+		}
+	}
+
+	// build required counts
+	// build tags
+	reqCount := 0
+	prop.decoderOrigNames = make(map[string]int)
+	for i, p := range prop.Prop {
+		if strings.HasPrefix(p.Name, "XXX_") {
+			// Internal fields should not appear in tags/origNames maps.
+			// They are handled specially when encoding and decoding.
+			continue
+		}
+		if p.Required {
+			reqCount++
+		}
+		prop.decoderTags.put(p.Tag, i)
+		prop.decoderOrigNames[p.OrigName] = i
+	}
+	prop.reqCount = reqCount
+
+	return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+	if len(x) != 1 {
+		fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+		return nil
+	}
+	prop := GetProperties(t)
+	return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+	if pb == nil {
+		err = ErrNil
+		return
+	}
+	// get the reflect type of the pointer to the struct.
+	t = reflect.TypeOf(pb)
+	// get the address of the struct.
+	value := reflect.ValueOf(pb)
+	b = toStructPointer(value)
+	return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+	if _, ok := enumValueMaps[typeName]; ok {
+		panic("proto: duplicate enum registered: " + typeName)
+	}
+	enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+	return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+	protoTypes    = make(map[string]reflect.Type)
+	revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+	if _, ok := protoTypes[name]; ok {
+		// TODO: Some day, make this a panic.
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	protoTypes[name] = t
+	revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+	type xname interface {
+		XXX_MessageName() string
+	}
+	if m, ok := x.(xname); ok {
+		return m.XXX_MessageName()
+	}
+	return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
+
+// A registry of all linked proto files.
+var (
+	protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+	protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 00000000..965876bf
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,854 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+	"bufio"
+	"bytes"
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"math"
+	"reflect"
+	"sort"
+	"strings"
+)
+
+var (
+	newline         = []byte("\n")
+	spaces          = []byte("                                        ")
+	gtNewline       = []byte(">\n")
+	endBraceNewline = []byte("}\n")
+	backslashN      = []byte{'\\', 'n'}
+	backslashR      = []byte{'\\', 'r'}
+	backslashT      = []byte{'\\', 't'}
+	backslashDQ     = []byte{'\\', '"'}
+	backslashBS     = []byte{'\\', '\\'}
+	posInf          = []byte("inf")
+	negInf          = []byte("-inf")
+	nan             = []byte("nan")
+)
+
+type writer interface {
+	io.Writer
+	WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+	ind      int
+	complete bool // if the current position is a complete line
+	compact  bool // whether to write out as a one-liner
+	w        writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+	if !strings.Contains(s, "\n") {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		w.complete = false
+		return io.WriteString(w.w, s)
+	}
+	// WriteString is typically called without newlines, so this
+	// codepath and its copy are rare.  We copy to avoid
+	// duplicating all of Write's logic here.
+	return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+	newlines := bytes.Count(p, newline)
+	if newlines == 0 {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		n, err = w.w.Write(p)
+		w.complete = false
+		return n, err
+	}
+
+	frags := bytes.SplitN(p, newline, newlines+1)
+	if w.compact {
+		for i, frag := range frags {
+			if i > 0 {
+				if err := w.w.WriteByte(' '); err != nil {
+					return n, err
+				}
+				n++
+			}
+			nn, err := w.w.Write(frag)
+			n += nn
+			if err != nil {
+				return n, err
+			}
+		}
+		return n, nil
+	}
+
+	for i, frag := range frags {
+		if w.complete {
+			w.writeIndent()
+		}
+		nn, err := w.w.Write(frag)
+		n += nn
+		if err != nil {
+			return n, err
+		}
+		if i+1 < len(frags) {
+			if err := w.w.WriteByte('\n'); err != nil {
+				return n, err
+			}
+			n++
+		}
+	}
+	w.complete = len(frags[len(frags)-1]) == 0
+	return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+	if w.compact && c == '\n' {
+		c = ' '
+	}
+	if !w.compact && w.complete {
+		w.writeIndent()
+	}
+	err := w.w.WriteByte(c)
+	w.complete = c == '\n'
+	return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+	if w.ind == 0 {
+		log.Print("proto: textWriter unindented too far")
+		return
+	}
+	w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+	if _, err := w.WriteString(props.OrigName); err != nil {
+		return err
+	}
+	if props.Wire != "group" {
+		return w.WriteByte(':')
+	}
+	return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+	Bytes() []byte
+}
+
+func requiresQuotes(u string) bool {
+	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+	for _, ch := range u {
+		switch {
+		case ch == '.' || ch == '/' || ch == '_':
+			continue
+		case '0' <= ch && ch <= '9':
+			continue
+		case 'A' <= ch && ch <= 'Z':
+			continue
+		case 'a' <= ch && ch <= 'z':
+			continue
+		default:
+			return true
+		}
+	}
+	return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+	type wkt interface {
+		XXX_WellKnownType() string
+	}
+	t, ok := sv.Addr().Interface().(wkt)
+	return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+	turl := sv.FieldByName("TypeUrl")
+	val := sv.FieldByName("Value")
+	if !turl.IsValid() || !val.IsValid() {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	b, ok := val.Interface().([]byte)
+	if !ok {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	parts := strings.Split(turl.String(), "/")
+	mt := MessageType(parts[len(parts)-1])
+	if mt == nil {
+		return false, nil
+	}
+	m := reflect.New(mt.Elem())
+	if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+		return false, nil
+	}
+	w.Write([]byte("["))
+	u := turl.String()
+	if requiresQuotes(u) {
+		writeString(w, u)
+	} else {
+		w.Write([]byte(u))
+	}
+	if w.compact {
+		w.Write([]byte("]:<"))
+	} else {
+		w.Write([]byte("]: <\n"))
+		w.ind++
+	}
+	if err := tm.writeStruct(w, m.Elem()); err != nil {
+		return true, err
+	}
+	if w.compact {
+		w.Write([]byte("> "))
+	} else {
+		w.ind--
+		w.Write([]byte(">\n"))
+	}
+	return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+	if tm.ExpandAny && isAny(sv) {
+		if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+			return err
+		}
+	}
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < sv.NumField(); i++ {
+		fv := sv.Field(i)
+		props := sprops.Prop[i]
+		name := st.Field(i).Name
+
+		if strings.HasPrefix(name, "XXX_") {
+			// There are two XXX_ fields:
+			//   XXX_unrecognized []byte
+			//   XXX_extensions   map[int32]proto.Extension
+			// The first is handled here;
+			// the second is handled at the bottom of this function.
+			if name == "XXX_unrecognized" && !fv.IsNil() {
+				if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Ptr && fv.IsNil() {
+			// Field not filled in. This could be an optional field or
+			// a required field that wasn't filled in. Either way, there
+			// isn't anything we can show for it.
+			continue
+		}
+		if fv.Kind() == reflect.Slice && fv.IsNil() {
+			// Repeated field that is empty, or a bytes field that is unused.
+			continue
+		}
+
+		if props.Repeated && fv.Kind() == reflect.Slice {
+			// Repeated field.
+			for j := 0; j < fv.Len(); j++ {
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				v := fv.Index(j)
+				if v.Kind() == reflect.Ptr && v.IsNil() {
+					// A nil message in a repeated field is not valid,
+					// but we can handle that more gracefully than panicking.
+					if _, err := w.Write([]byte("<nil>\n")); err != nil {
+						return err
+					}
+					continue
+				}
+				if err := tm.writeAny(w, v, props); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Map {
+			// Map fields are rendered as a repeated struct with key/value fields.
+			keys := fv.MapKeys()
+			sort.Sort(mapKeys(keys))
+			for _, key := range keys {
+				val := fv.MapIndex(key)
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				// open struct
+				if err := w.WriteByte('<'); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				w.indent()
+				// key
+				if _, err := w.WriteString("key:"); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+				// nil values aren't legal, but we can avoid panicking because of them.
+				if val.Kind() != reflect.Ptr || !val.IsNil() {
+					// value
+					if _, err := w.WriteString("value:"); err != nil {
+						return err
+					}
+					if !w.compact {
+						if err := w.WriteByte(' '); err != nil {
+							return err
+						}
+					}
+					if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+						return err
+					}
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				// close struct
+				w.unindent()
+				if err := w.WriteByte('>'); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+			// empty bytes field
+			continue
+		}
+		if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+			// proto3 non-repeated scalar field; skip if zero value
+			if isProto3Zero(fv) {
+				continue
+			}
+		}
+
+		if fv.Kind() == reflect.Interface {
+			// Check if it is a oneof.
+			if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+				// fv is nil, or holds a pointer to generated struct.
+				// That generated struct has exactly one field,
+				// which has a protobuf struct tag.
+				if fv.IsNil() {
+					continue
+				}
+				inner := fv.Elem().Elem() // interface -> *T -> T
+				tag := inner.Type().Field(0).Tag.Get("protobuf")
+				props = new(Properties) // Overwrite the outer props var, but not its pointee.
+				props.Parse(tag)
+				// Write the value in the oneof, not the oneof itself.
+				fv = inner.Field(0)
+
+				// Special case to cope with malformed messages gracefully:
+				// If the value in the oneof is a nil pointer, don't panic
+				// in writeAny.
+				if fv.Kind() == reflect.Ptr && fv.IsNil() {
+					// Use errors.New so writeAny won't render quotes.
+					msg := errors.New("/* nil */")
+					fv = reflect.ValueOf(&msg).Elem()
+				}
+			}
+		}
+
+		if err := writeName(w, props); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+		if b, ok := fv.Interface().(raw); ok {
+			if err := writeRaw(w, b.Bytes()); err != nil {
+				return err
+			}
+			continue
+		}
+
+		// Enums have a String method, so writeAny will work fine.
+		if err := tm.writeAny(w, fv, props); err != nil {
+			return err
+		}
+
+		if err := w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+
+	// Extensions (the XXX_extensions field).
+	pv := sv.Addr()
+	if _, ok := extendable(pv.Interface()); ok {
+		if err := tm.writeExtensions(w, pv); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+	if err := w.WriteByte('<'); err != nil {
+		return err
+	}
+	if !w.compact {
+		if err := w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+	w.indent()
+	if err := writeUnknownStruct(w, b); err != nil {
+		return err
+	}
+	w.unindent()
+	if err := w.WriteByte('>'); err != nil {
+		return err
+	}
+	return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+	v = reflect.Indirect(v)
+
+	// Floats have special cases.
+	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+		x := v.Float()
+		var b []byte
+		switch {
+		case math.IsInf(x, 1):
+			b = posInf
+		case math.IsInf(x, -1):
+			b = negInf
+		case math.IsNaN(x):
+			b = nan
+		}
+		if b != nil {
+			_, err := w.Write(b)
+			return err
+		}
+		// Other values are handled below.
+	}
+
+	// We don't attempt to serialise every possible value type; only those
+	// that can occur in protocol buffers.
+	switch v.Kind() {
+	case reflect.Slice:
+		// Should only be a []byte; repeated fields are handled in writeStruct.
+		if err := writeString(w, string(v.Bytes())); err != nil {
+			return err
+		}
+	case reflect.String:
+		if err := writeString(w, v.String()); err != nil {
+			return err
+		}
+	case reflect.Struct:
+		// Required/optional group/message.
+		var bra, ket byte = '<', '>'
+		if props != nil && props.Wire == "group" {
+			bra, ket = '{', '}'
+		}
+		if err := w.WriteByte(bra); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte('\n'); err != nil {
+				return err
+			}
+		}
+		w.indent()
+		if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+			text, err := etm.MarshalText()
+			if err != nil {
+				return err
+			}
+			if _, err = w.Write(text); err != nil {
+				return err
+			}
+		} else if err := tm.writeStruct(w, v); err != nil {
+			return err
+		}
+		w.unindent()
+		if err := w.WriteByte(ket); err != nil {
+			return err
+		}
+	default:
+		_, err := fmt.Fprint(w, v.Interface())
+		return err
+	}
+	return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+	return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+	// use WriteByte here to get any needed indent
+	if err := w.WriteByte('"'); err != nil {
+		return err
+	}
+	// Loop over the bytes, not the runes.
+	for i := 0; i < len(s); i++ {
+		var err error
+		// Divergence from C++: we don't escape apostrophes.
+		// There's no need to escape them, and the C++ parser
+		// copes with a naked apostrophe.
+		switch c := s[i]; c {
+		case '\n':
+			_, err = w.w.Write(backslashN)
+		case '\r':
+			_, err = w.w.Write(backslashR)
+		case '\t':
+			_, err = w.w.Write(backslashT)
+		case '"':
+			_, err = w.w.Write(backslashDQ)
+		case '\\':
+			_, err = w.w.Write(backslashBS)
+		default:
+			if isprint(c) {
+				err = w.w.WriteByte(c)
+			} else {
+				_, err = fmt.Fprintf(w.w, "\\%03o", c)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+	if !w.compact {
+		if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+			return err
+		}
+	}
+	b := NewBuffer(data)
+	for b.index < len(b.buf) {
+		x, err := b.DecodeVarint()
+		if err != nil {
+			_, err := fmt.Fprintf(w, "/* %v */\n", err)
+			return err
+		}
+		wire, tag := x&7, x>>3
+		if wire == WireEndGroup {
+			w.unindent()
+			if _, err := w.Write(endBraceNewline); err != nil {
+				return err
+			}
+			continue
+		}
+		if _, err := fmt.Fprint(w, tag); err != nil {
+			return err
+		}
+		if wire != WireStartGroup {
+			if err := w.WriteByte(':'); err != nil {
+				return err
+			}
+		}
+		if !w.compact || wire == WireStartGroup {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+		switch wire {
+		case WireBytes:
+			buf, e := b.DecodeRawBytes(false)
+			if e == nil {
+				_, err = fmt.Fprintf(w, "%q", buf)
+			} else {
+				_, err = fmt.Fprintf(w, "/* %v */", e)
+			}
+		case WireFixed32:
+			x, err = b.DecodeFixed32()
+			err = writeUnknownInt(w, x, err)
+		case WireFixed64:
+			x, err = b.DecodeFixed64()
+			err = writeUnknownInt(w, x, err)
+		case WireStartGroup:
+			err = w.WriteByte('{')
+			w.indent()
+		case WireVarint:
+			x, err = b.DecodeVarint()
+			err = writeUnknownInt(w, x, err)
+		default:
+			_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+		}
+		if err != nil {
+			return err
+		}
+		if err = w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+	if err == nil {
+		_, err = fmt.Fprint(w, x)
+	} else {
+		_, err = fmt.Fprintf(w, "/* %v */", err)
+	}
+	return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int           { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+	emap := extensionMaps[pv.Type().Elem()]
+	ep, _ := extendable(pv.Interface())
+
+	// Order the extensions by ID.
+	// This isn't strictly necessary, but it will give us
+	// canonical output, which will also make testing easier.
+	m, mu := ep.extensionsRead()
+	if m == nil {
+		return nil
+	}
+	mu.Lock()
+	ids := make([]int32, 0, len(m))
+	for id := range m {
+		ids = append(ids, id)
+	}
+	sort.Sort(int32Slice(ids))
+	mu.Unlock()
+
+	for _, extNum := range ids {
+		ext := m[extNum]
+		var desc *ExtensionDesc
+		if emap != nil {
+			desc = emap[extNum]
+		}
+		if desc == nil {
+			// Unknown extension.
+			if err := writeUnknownStruct(w, ext.enc); err != nil {
+				return err
+			}
+			continue
+		}
+
+		pb, err := GetExtension(ep, desc)
+		if err != nil {
+			return fmt.Errorf("failed getting extension: %v", err)
+		}
+
+		// Repeated extensions will appear as a slice.
+		if !desc.repeated() {
+			if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+				return err
+			}
+		} else {
+			v := reflect.ValueOf(pb)
+			for i := 0; i < v.Len(); i++ {
+				if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+	if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+		return err
+	}
+	if !w.compact {
+		if err := w.WriteByte(' '); err != nil {
+			return err
+		}
+	}
+	if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+		return err
+	}
+	if err := w.WriteByte('\n'); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (w *textWriter) writeIndent() {
+	if !w.complete {
+		return
+	}
+	remain := w.ind * 2
+	for remain > 0 {
+		n := remain
+		if n > len(spaces) {
+			n = len(spaces)
+		}
+		w.w.Write(spaces[:n])
+		remain -= n
+	}
+	w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+	Compact   bool // use compact text format (one line).
+	ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+	val := reflect.ValueOf(pb)
+	if pb == nil || val.IsNil() {
+		w.Write([]byte("<nil>"))
+		return nil
+	}
+	var bw *bufio.Writer
+	ww, ok := w.(writer)
+	if !ok {
+		bw = bufio.NewWriter(w)
+		ww = bw
+	}
+	aw := &textWriter{
+		w:        ww,
+		complete: true,
+		compact:  tm.Compact,
+	}
+
+	if etm, ok := pb.(encoding.TextMarshaler); ok {
+		text, err := etm.MarshalText()
+		if err != nil {
+			return err
+		}
+		if _, err = aw.Write(text); err != nil {
+			return err
+		}
+		if bw != nil {
+			return bw.Flush()
+		}
+		return nil
+	}
+	// Dereference the received pointer so we don't have outer < and >.
+	v := reflect.Indirect(val)
+	if err := tm.writeStruct(aw, v); err != nil {
+		return err
+	}
+	if bw != nil {
+		return bw.Flush()
+	}
+	return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+	var buf bytes.Buffer
+	tm.Marshal(&buf, pb)
+	return buf.String()
+}
+
+var (
+	defaultTextMarshaler = TextMarshaler{}
+	compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 00000000..61f83c1e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,895 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+	Message string
+	Line    int // 1-based line number
+	Offset  int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+	if p.Line == 1 {
+		// show offset only for first line
+		return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+	}
+	return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+	value    string
+	err      *ParseError
+	line     int    // line number
+	offset   int    // byte number from start of input, not start of line
+	unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+	if t.err == nil {
+		return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+	}
+	return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+	s            string // remaining input
+	done         bool   // whether the parsing is finished (success or error)
+	backed       bool   // whether back() was called
+	offset, line int
+	cur          token
+}
+
+func newTextParser(s string) *textParser {
+	p := new(textParser)
+	p.s = s
+	p.line = 1
+	p.cur.line = 1
+	return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+	pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+	p.cur.err = pe
+	p.done = true
+	return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+	switch {
+	case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+		return true
+	case '0' <= c && c <= '9':
+		return true
+	}
+	switch c {
+	case '-', '+', '.', '_':
+		return true
+	}
+	return false
+}
+
+func isWhitespace(c byte) bool {
+	switch c {
+	case ' ', '\t', '\n', '\r':
+		return true
+	}
+	return false
+}
+
+func isQuote(c byte) bool {
+	switch c {
+	case '"', '\'':
+		return true
+	}
+	return false
+}
+
+func (p *textParser) skipWhitespace() {
+	i := 0
+	for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+		if p.s[i] == '#' {
+			// comment; skip to end of line or input
+			for i < len(p.s) && p.s[i] != '\n' {
+				i++
+			}
+			if i == len(p.s) {
+				break
+			}
+		}
+		if p.s[i] == '\n' {
+			p.line++
+		}
+		i++
+	}
+	p.offset += i
+	p.s = p.s[i:len(p.s)]
+	if len(p.s) == 0 {
+		p.done = true
+	}
+}
+
+func (p *textParser) advance() {
+	// Skip whitespace
+	p.skipWhitespace()
+	if p.done {
+		return
+	}
+
+	// Start of non-whitespace
+	p.cur.err = nil
+	p.cur.offset, p.cur.line = p.offset, p.line
+	p.cur.unquoted = ""
+	switch p.s[0] {
+	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+		// Single symbol
+		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+	case '"', '\'':
+		// Quoted string
+		i := 1
+		for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+			if p.s[i] == '\\' && i+1 < len(p.s) {
+				// skip escaped char
+				i++
+			}
+			i++
+		}
+		if i >= len(p.s) || p.s[i] != p.s[0] {
+			p.errorf("unmatched quote")
+			return
+		}
+		unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+		if err != nil {
+			p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+			return
+		}
+		p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+		p.cur.unquoted = unq
+	default:
+		i := 0
+		for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+			i++
+		}
+		if i == 0 {
+			p.errorf("unexpected byte %#x", p.s[0])
+			return
+		}
+		p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+	}
+	p.offset += len(p.cur.value)
+}
+
+var (
+	errBadUTF8 = errors.New("proto: bad UTF-8")
+	errBadHex  = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+	// This is based on C++'s tokenizer.cc.
+	// Despite its name, this is *not* parsing C syntax.
+	// For instance, "\0" is an invalid quoted string.
+
+	// Avoid allocation in trivial cases.
+	simple := true
+	for _, r := range s {
+		if r == '\\' || r == quote {
+			simple = false
+			break
+		}
+	}
+	if simple {
+		return s, nil
+	}
+
+	buf := make([]byte, 0, 3*len(s)/2)
+	for len(s) > 0 {
+		r, n := utf8.DecodeRuneInString(s)
+		if r == utf8.RuneError && n == 1 {
+			return "", errBadUTF8
+		}
+		s = s[n:]
+		if r != '\\' {
+			if r < utf8.RuneSelf {
+				buf = append(buf, byte(r))
+			} else {
+				buf = append(buf, string(r)...)
+			}
+			continue
+		}
+
+		ch, tail, err := unescape(s)
+		if err != nil {
+			return "", err
+		}
+		buf = append(buf, ch...)
+		s = tail
+	}
+	return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+	r, n := utf8.DecodeRuneInString(s)
+	if r == utf8.RuneError && n == 1 {
+		return "", "", errBadUTF8
+	}
+	s = s[n:]
+	switch r {
+	case 'a':
+		return "\a", s, nil
+	case 'b':
+		return "\b", s, nil
+	case 'f':
+		return "\f", s, nil
+	case 'n':
+		return "\n", s, nil
+	case 'r':
+		return "\r", s, nil
+	case 't':
+		return "\t", s, nil
+	case 'v':
+		return "\v", s, nil
+	case '?':
+		return "?", s, nil // trigraph workaround
+	case '\'', '"', '\\':
+		return string(r), s, nil
+	case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+		if len(s) < 2 {
+			return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+		}
+		base := 8
+		ss := s[:2]
+		s = s[2:]
+		if r == 'x' || r == 'X' {
+			base = 16
+		} else {
+			ss = string(r) + ss
+		}
+		i, err := strconv.ParseUint(ss, base, 8)
+		if err != nil {
+			return "", "", err
+		}
+		return string([]byte{byte(i)}), s, nil
+	case 'u', 'U':
+		n := 4
+		if r == 'U' {
+			n = 8
+		}
+		if len(s) < n {
+			return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+		}
+
+		bs := make([]byte, n/2)
+		for i := 0; i < n; i += 2 {
+			a, ok1 := unhex(s[i])
+			b, ok2 := unhex(s[i+1])
+			if !ok1 || !ok2 {
+				return "", "", errBadHex
+			}
+			bs[i/2] = a<<4 | b
+		}
+		s = s[n:]
+		return string(bs), s, nil
+	}
+	return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+	switch {
+	case '0' <= b && b <= '9':
+		return b - '0', true
+	case 'a' <= b && b <= 'f':
+		return b - 'a' + 10, true
+	case 'A' <= b && b <= 'F':
+		return b - 'A' + 10, true
+	}
+	return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+	if p.backed || p.done {
+		p.backed = false
+		return &p.cur
+	}
+	p.advance()
+	if p.done {
+		p.cur.value = ""
+	} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+		// Look for multiple quoted strings separated by whitespace,
+		// and concatenate them.
+		cat := p.cur
+		for {
+			p.skipWhitespace()
+			if p.done || !isQuote(p.s[0]) {
+				break
+			}
+			p.advance()
+			if p.cur.err != nil {
+				return &p.cur
+			}
+			cat.value += " " + p.cur.value
+			cat.unquoted += p.cur.unquoted
+		}
+		p.done = false // parser may have seen EOF, but we want to return cat
+		p.cur = cat
+	}
+	return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != s {
+		p.back()
+		return p.errorf("expected %q, found %q", s, tok.value)
+	}
+	return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < st.NumField(); i++ {
+		if !isNil(sv.Field(i)) {
+			continue
+		}
+
+		props := sprops.Prop[i]
+		if props.Required {
+			return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+		}
+	}
+	return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+	i, ok := sprops.decoderOrigNames[name]
+	if ok {
+		return i, sprops.Prop[i], true
+	}
+	return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ":" {
+		// Colon is optional when the field is a group or message.
+		needColon := true
+		switch props.Wire {
+		case "group":
+			needColon = false
+		case "bytes":
+			// A "bytes" field is either a message, a string, or a repeated field;
+			// those three become *T, *string and []T respectively, so we can check for
+			// this field being a pointer to a non-string.
+			if typ.Kind() == reflect.Ptr {
+				// *T or *string
+				if typ.Elem().Kind() == reflect.String {
+					break
+				}
+			} else if typ.Kind() == reflect.Slice {
+				// []T or []*T
+				if typ.Elem().Kind() != reflect.Ptr {
+					break
+				}
+			} else if typ.Kind() == reflect.String {
+				// The proto3 exception is for a string field,
+				// which requires a colon.
+				break
+			}
+			needColon = false
+		}
+		if needColon {
+			return p.errorf("expected ':', found %q", tok.value)
+		}
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	reqCount := sprops.reqCount
+	var reqFieldErr error
+	fieldSet := make(map[string]bool)
+	// A struct is a sequence of "name: value", terminated by one of
+	// '>' or '}', or the end of the input.  A name may also be
+	// "[extension]" or "[type/url]".
+	//
+	// The whole struct can also be an expanded Any message, like:
+	// [type/url] < ... struct contents ... >
+	for {
+		tok := p.next()
+		if tok.err != nil {
+			return tok.err
+		}
+		if tok.value == terminator {
+			break
+		}
+		if tok.value == "[" {
+			// Looks like an extension or an Any.
+			//
+			// TODO: Check whether we need to handle
+			// namespace rooted names (e.g. ".something.Foo").
+			extName, err := p.consumeExtName()
+			if err != nil {
+				return err
+			}
+
+			if s := strings.LastIndex(extName, "/"); s >= 0 {
+				// If it contains a slash, it's an Any type URL.
+				messageName := extName[s+1:]
+				mt := MessageType(messageName)
+				if mt == nil {
+					return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+				}
+				tok = p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				// consume an optional colon
+				if tok.value == ":" {
+					tok = p.next()
+					if tok.err != nil {
+						return tok.err
+					}
+				}
+				var terminator string
+				switch tok.value {
+				case "<":
+					terminator = ">"
+				case "{":
+					terminator = "}"
+				default:
+					return p.errorf("expected '{' or '<', found %q", tok.value)
+				}
+				v := reflect.New(mt.Elem())
+				if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+					return pe
+				}
+				b, err := Marshal(v.Interface().(Message))
+				if err != nil {
+					return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+				}
+				if fieldSet["type_url"] {
+					return p.errorf(anyRepeatedlyUnpacked, "type_url")
+				}
+				if fieldSet["value"] {
+					return p.errorf(anyRepeatedlyUnpacked, "value")
+				}
+				sv.FieldByName("TypeUrl").SetString(extName)
+				sv.FieldByName("Value").SetBytes(b)
+				fieldSet["type_url"] = true
+				fieldSet["value"] = true
+				continue
+			}
+
+			var desc *ExtensionDesc
+			// This could be faster, but it's functional.
+			// TODO: Do something smarter than a linear scan.
+			for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+				if d.Name == extName {
+					desc = d
+					break
+				}
+			}
+			if desc == nil {
+				return p.errorf("unrecognized extension %q", extName)
+			}
+
+			props := &Properties{}
+			props.Parse(desc.Tag)
+
+			typ := reflect.TypeOf(desc.ExtensionType)
+			if err := p.checkForColon(props, typ); err != nil {
+				return err
+			}
+
+			rep := desc.repeated()
+
+			// Read the extension structure, and set it in
+			// the value we're constructing.
+			var ext reflect.Value
+			if !rep {
+				ext = reflect.New(typ).Elem()
+			} else {
+				ext = reflect.New(typ.Elem()).Elem()
+			}
+			if err := p.readAny(ext, props); err != nil {
+				if _, ok := err.(*RequiredNotSetError); !ok {
+					return err
+				}
+				reqFieldErr = err
+			}
+			ep := sv.Addr().Interface().(Message)
+			if !rep {
+				SetExtension(ep, desc, ext.Interface())
+			} else {
+				old, err := GetExtension(ep, desc)
+				var sl reflect.Value
+				if err == nil {
+					sl = reflect.ValueOf(old) // existing slice
+				} else {
+					sl = reflect.MakeSlice(typ, 0, 1)
+				}
+				sl = reflect.Append(sl, ext)
+				SetExtension(ep, desc, sl.Interface())
+			}
+			if err := p.consumeOptionalSeparator(); err != nil {
+				return err
+			}
+			continue
+		}
+
+		// This is a normal, non-extension field.
+		name := tok.value
+		var dst reflect.Value
+		fi, props, ok := structFieldByName(sprops, name)
+		if ok {
+			dst = sv.Field(fi)
+		} else if oop, ok := sprops.OneofTypes[name]; ok {
+			// It is a oneof.
+			props = oop.Prop
+			nv := reflect.New(oop.Type.Elem())
+			dst = nv.Elem().Field(0)
+			field := sv.Field(oop.Field)
+			if !field.IsNil() {
+				return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+			}
+			field.Set(nv)
+		}
+		if !dst.IsValid() {
+			return p.errorf("unknown field name %q in %v", name, st)
+		}
+
+		if dst.Kind() == reflect.Map {
+			// Consume any colon.
+			if err := p.checkForColon(props, dst.Type()); err != nil {
+				return err
+			}
+
+			// Construct the map if it doesn't already exist.
+			if dst.IsNil() {
+				dst.Set(reflect.MakeMap(dst.Type()))
+			}
+			key := reflect.New(dst.Type().Key()).Elem()
+			val := reflect.New(dst.Type().Elem()).Elem()
+
+			// The map entry should be this sequence of tokens:
+			//	< key : KEY value : VALUE >
+			// However, implementations may omit key or value, and technically
+			// we should support them in any order.  See b/28924776 for a time
+			// this went wrong.
+
+			tok := p.next()
+			var terminator string
+			switch tok.value {
+			case "<":
+				terminator = ">"
+			case "{":
+				terminator = "}"
+			default:
+				return p.errorf("expected '{' or '<', found %q", tok.value)
+			}
+			for {
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == terminator {
+					break
+				}
+				switch tok.value {
+				case "key":
+					if err := p.consumeToken(":"); err != nil {
+						return err
+					}
+					if err := p.readAny(key, props.mkeyprop); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				case "value":
+					if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+						return err
+					}
+					if err := p.readAny(val, props.mvalprop); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				default:
+					p.back()
+					return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+				}
+			}
+
+			dst.SetMapIndex(key, val)
+			continue
+		}
+
+		// Check that it's not already set if it's not a repeated field.
+		if !props.Repeated && fieldSet[name] {
+			return p.errorf("non-repeated field %q was repeated", name)
+		}
+
+		if err := p.checkForColon(props, dst.Type()); err != nil {
+			return err
+		}
+
+		// Parse into the field.
+		fieldSet[name] = true
+		if err := p.readAny(dst, props); err != nil {
+			if _, ok := err.(*RequiredNotSetError); !ok {
+				return err
+			}
+			reqFieldErr = err
+		}
+		if props.Required {
+			reqCount--
+		}
+
+		if err := p.consumeOptionalSeparator(); err != nil {
+			return err
+		}
+
+	}
+
+	if reqCount > 0 {
+		return p.missingRequiredFieldError(sv)
+	}
+	return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return "", tok.err
+	}
+
+	// If extension name or type url is quoted, it's a single token.
+	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+		if err != nil {
+			return "", err
+		}
+		return name, p.consumeToken("]")
+	}
+
+	// Consume everything up to "]"
+	var parts []string
+	for tok.value != "]" {
+		parts = append(parts, tok.value)
+		tok = p.next()
+		if tok.err != nil {
+			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+		}
+	}
+	return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ";" && tok.value != "," {
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value == "" {
+		return p.errorf("unexpected EOF")
+	}
+
+	switch fv := v; fv.Kind() {
+	case reflect.Slice:
+		at := v.Type()
+		if at.Elem().Kind() == reflect.Uint8 {
+			// Special case for []byte
+			if tok.value[0] != '"' && tok.value[0] != '\'' {
+				// Deliberately written out here, as the error after
+				// this switch statement would write "invalid []byte: ...",
+				// which is not as user-friendly.
+				return p.errorf("invalid string: %v", tok.value)
+			}
+			bytes := []byte(tok.unquoted)
+			fv.Set(reflect.ValueOf(bytes))
+			return nil
+		}
+		// Repeated field.
+		if tok.value == "[" {
+			// Repeated field with list notation, like [1,2,3].
+			for {
+				fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+				err := p.readAny(fv.Index(fv.Len()-1), props)
+				if err != nil {
+					return err
+				}
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == "]" {
+					break
+				}
+				if tok.value != "," {
+					return p.errorf("Expected ']' or ',' found %q", tok.value)
+				}
+			}
+			return nil
+		}
+		// One value of the repeated field.
+		p.back()
+		fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+		return p.readAny(fv.Index(fv.Len()-1), props)
+	case reflect.Bool:
+		// true/1/t/True or false/f/0/False.
+		switch tok.value {
+		case "true", "1", "t", "True":
+			fv.SetBool(true)
+			return nil
+		case "false", "0", "f", "False":
+			fv.SetBool(false)
+			return nil
+		}
+	case reflect.Float32, reflect.Float64:
+		v := tok.value
+		// Ignore 'f' for compatibility with output generated by C++, but don't
+		// remove 'f' when the value is "-inf" or "inf".
+		if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+			v = v[:len(v)-1]
+		}
+		if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+			fv.SetFloat(f)
+			return nil
+		}
+	case reflect.Int32:
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+		if len(props.Enum) == 0 {
+			break
+		}
+		m, ok := enumValueMaps[props.Enum]
+		if !ok {
+			break
+		}
+		x, ok := m[tok.value]
+		if !ok {
+			break
+		}
+		fv.SetInt(int64(x))
+		return nil
+	case reflect.Int64:
+		if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+	case reflect.Ptr:
+		// A basic field (indirected through pointer), or a repeated message/group
+		p.back()
+		fv.Set(reflect.New(fv.Type().Elem()))
+		return p.readAny(fv.Elem(), props)
+	case reflect.String:
+		if tok.value[0] == '"' || tok.value[0] == '\'' {
+			fv.SetString(tok.unquoted)
+			return nil
+		}
+	case reflect.Struct:
+		var terminator string
+		switch tok.value {
+		case "{":
+			terminator = "}"
+		case "<":
+			terminator = ">"
+		default:
+			return p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+		// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+		return p.readStruct(fv, terminator)
+	case reflect.Uint32:
+		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+			fv.SetUint(uint64(x))
+			return nil
+		}
+	case reflect.Uint64:
+		if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	}
+	return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+	if um, ok := pb.(encoding.TextUnmarshaler); ok {
+		err := um.UnmarshalText([]byte(s))
+		return err
+	}
+	pb.Reset()
+	v := reflect.ValueOf(pb)
+	if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+		return pe
+	}
+	return nil
+}
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
new file mode 100644
index 00000000..f143ed6a
--- /dev/null
+++ b/vendor/golang.org/x/net/context/context.go
@@ -0,0 +1,156 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package context defines the Context type, which carries deadlines,
+// cancelation signals, and other request-scoped values across API boundaries
+// and between processes.
+//
+// Incoming requests to a server should create a Context, and outgoing calls to
+// servers should accept a Context. The chain of function calls between must
+// propagate the Context, optionally replacing it with a modified copy created
+// using WithDeadline, WithTimeout, WithCancel, or WithValue.
+//
+// Programs that use Contexts should follow these rules to keep interfaces
+// consistent across packages and enable static analysis tools to check context
+// propagation:
+//
+// Do not store Contexts inside a struct type; instead, pass a Context
+// explicitly to each function that needs it. The Context should be the first
+// parameter, typically named ctx:
+//
+// 	func DoSomething(ctx context.Context, arg Arg) error {
+// 		// ... use ctx ...
+// 	}
+//
+// Do not pass a nil Context, even if a function permits it. Pass context.TODO
+// if you are unsure about which Context to use.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+//
+// The same Context may be passed to functions running in different goroutines;
+// Contexts are safe for simultaneous use by multiple goroutines.
+//
+// See http://blog.golang.org/context for example code for a server that uses
+// Contexts.
+package context // import "golang.org/x/net/context"
+
+import "time"
+
+// A Context carries a deadline, a cancelation signal, and other values across
+// API boundaries.
+//
+// Context's methods may be called by multiple goroutines simultaneously.
+type Context interface {
+	// Deadline returns the time when work done on behalf of this context
+	// should be canceled. Deadline returns ok==false when no deadline is
+	// set. Successive calls to Deadline return the same results.
+	Deadline() (deadline time.Time, ok bool)
+
+	// Done returns a channel that's closed when work done on behalf of this
+	// context should be canceled. Done may return nil if this context can
+	// never be canceled. Successive calls to Done return the same value.
+	//
+	// WithCancel arranges for Done to be closed when cancel is called;
+	// WithDeadline arranges for Done to be closed when the deadline
+	// expires; WithTimeout arranges for Done to be closed when the timeout
+	// elapses.
+	//
+	// Done is provided for use in select statements:
+	//
+	//  // Stream generates values with DoSomething and sends them to out
+	//  // until DoSomething returns an error or ctx.Done is closed.
+	//  func Stream(ctx context.Context, out chan<- Value) error {
+	//  	for {
+	//  		v, err := DoSomething(ctx)
+	//  		if err != nil {
+	//  			return err
+	//  		}
+	//  		select {
+	//  		case <-ctx.Done():
+	//  			return ctx.Err()
+	//  		case out <- v:
+	//  		}
+	//  	}
+	//  }
+	//
+	// See http://blog.golang.org/pipelines for more examples of how to use
+	// a Done channel for cancelation.
+	Done() <-chan struct{}
+
+	// Err returns a non-nil error value after Done is closed. Err returns
+	// Canceled if the context was canceled or DeadlineExceeded if the
+	// context's deadline passed. No other values for Err are defined.
+	// After Done is closed, successive calls to Err return the same value.
+	Err() error
+
+	// Value returns the value associated with this context for key, or nil
+	// if no value is associated with key. Successive calls to Value with
+	// the same key returns the same result.
+	//
+	// Use context values only for request-scoped data that transits
+	// processes and API boundaries, not for passing optional parameters to
+	// functions.
+	//
+	// A key identifies a specific value in a Context. Functions that wish
+	// to store values in Context typically allocate a key in a global
+	// variable then use that key as the argument to context.WithValue and
+	// Context.Value. A key can be any type that supports equality;
+	// packages should define keys as an unexported type to avoid
+	// collisions.
+	//
+	// Packages that define a Context key should provide type-safe accessors
+	// for the values stores using that key:
+	//
+	// 	// Package user defines a User type that's stored in Contexts.
+	// 	package user
+	//
+	// 	import "golang.org/x/net/context"
+	//
+	// 	// User is the type of value stored in the Contexts.
+	// 	type User struct {...}
+	//
+	// 	// key is an unexported type for keys defined in this package.
+	// 	// This prevents collisions with keys defined in other packages.
+	// 	type key int
+	//
+	// 	// userKey is the key for user.User values in Contexts. It is
+	// 	// unexported; clients use user.NewContext and user.FromContext
+	// 	// instead of using this key directly.
+	// 	var userKey key = 0
+	//
+	// 	// NewContext returns a new Context that carries value u.
+	// 	func NewContext(ctx context.Context, u *User) context.Context {
+	// 		return context.WithValue(ctx, userKey, u)
+	// 	}
+	//
+	// 	// FromContext returns the User value stored in ctx, if any.
+	// 	func FromContext(ctx context.Context) (*User, bool) {
+	// 		u, ok := ctx.Value(userKey).(*User)
+	// 		return u, ok
+	// 	}
+	Value(key interface{}) interface{}
+}
+
+// Background returns a non-nil, empty Context. It is never canceled, has no
+// values, and has no deadline. It is typically used by the main function,
+// initialization, and tests, and as the top-level Context for incoming
+// requests.
+func Background() Context {
+	return background
+}
+
+// TODO returns a non-nil, empty Context. Code should use context.TODO when
+// it's unclear which Context to use or it is not yet available (because the
+// surrounding function has not yet been extended to accept a Context
+// parameter).  TODO is recognized by static analysis tools that determine
+// whether Contexts are propagated correctly in a program.
+func TODO() Context {
+	return todo
+}
+
+// A CancelFunc tells an operation to abandon its work.
+// A CancelFunc does not wait for the work to stop.
+// After the first call, subsequent calls to a CancelFunc do nothing.
+type CancelFunc func()
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
new file mode 100644
index 00000000..606cf1f9
--- /dev/null
+++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go
@@ -0,0 +1,74 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+// Package ctxhttp provides helper functions for performing context-aware HTTP requests.
+package ctxhttp // import "golang.org/x/net/context/ctxhttp"
+
+import (
+	"io"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"golang.org/x/net/context"
+)
+
+// Do sends an HTTP request with the provided http.Client and returns
+// an HTTP response.
+//
+// If the client is nil, http.DefaultClient is used.
+//
+// The provided ctx must be non-nil. If it is canceled or times out,
+// ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+	if client == nil {
+		client = http.DefaultClient
+	}
+	resp, err := client.Do(req.WithContext(ctx))
+	// If we got an error, and the context has been canceled,
+	// the context's error is probably more useful.
+	if err != nil {
+		select {
+		case <-ctx.Done():
+			err = ctx.Err()
+		default:
+		}
+	}
+	return resp, err
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+	req, err := http.NewRequest("GET", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+	req, err := http.NewRequest("HEAD", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+	req, err := http.NewRequest("POST", url, body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", bodyType)
+	return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+	return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
new file mode 100644
index 00000000..926870cc
--- /dev/null
+++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp_pre17.go
@@ -0,0 +1,147 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package ctxhttp // import "golang.org/x/net/context/ctxhttp"
+
+import (
+	"io"
+	"net/http"
+	"net/url"
+	"strings"
+
+	"golang.org/x/net/context"
+)
+
+func nop() {}
+
+var (
+	testHookContextDoneBeforeHeaders = nop
+	testHookDoReturned               = nop
+	testHookDidBodyClose             = nop
+)
+
+// Do sends an HTTP request with the provided http.Client and returns an HTTP response.
+// If the client is nil, http.DefaultClient is used.
+// If the context is canceled or times out, ctx.Err() will be returned.
+func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
+	if client == nil {
+		client = http.DefaultClient
+	}
+
+	// TODO(djd): Respect any existing value of req.Cancel.
+	cancel := make(chan struct{})
+	req.Cancel = cancel
+
+	type responseAndError struct {
+		resp *http.Response
+		err  error
+	}
+	result := make(chan responseAndError, 1)
+
+	// Make local copies of test hooks closed over by goroutines below.
+	// Prevents data races in tests.
+	testHookDoReturned := testHookDoReturned
+	testHookDidBodyClose := testHookDidBodyClose
+
+	go func() {
+		resp, err := client.Do(req)
+		testHookDoReturned()
+		result <- responseAndError{resp, err}
+	}()
+
+	var resp *http.Response
+
+	select {
+	case <-ctx.Done():
+		testHookContextDoneBeforeHeaders()
+		close(cancel)
+		// Clean up after the goroutine calling client.Do:
+		go func() {
+			if r := <-result; r.resp != nil {
+				testHookDidBodyClose()
+				r.resp.Body.Close()
+			}
+		}()
+		return nil, ctx.Err()
+	case r := <-result:
+		var err error
+		resp, err = r.resp, r.err
+		if err != nil {
+			return resp, err
+		}
+	}
+
+	c := make(chan struct{})
+	go func() {
+		select {
+		case <-ctx.Done():
+			close(cancel)
+		case <-c:
+			// The response's Body is closed.
+		}
+	}()
+	resp.Body = &notifyingReader{resp.Body, c}
+
+	return resp, nil
+}
+
+// Get issues a GET request via the Do function.
+func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+	req, err := http.NewRequest("GET", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	return Do(ctx, client, req)
+}
+
+// Head issues a HEAD request via the Do function.
+func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) {
+	req, err := http.NewRequest("HEAD", url, nil)
+	if err != nil {
+		return nil, err
+	}
+	return Do(ctx, client, req)
+}
+
+// Post issues a POST request via the Do function.
+func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) {
+	req, err := http.NewRequest("POST", url, body)
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", bodyType)
+	return Do(ctx, client, req)
+}
+
+// PostForm issues a POST request via the Do function.
+func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) {
+	return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode()))
+}
+
+// notifyingReader is an io.ReadCloser that closes the notify channel after
+// Close is called or a Read fails on the underlying ReadCloser.
+type notifyingReader struct {
+	io.ReadCloser
+	notify chan<- struct{}
+}
+
+func (r *notifyingReader) Read(p []byte) (int, error) {
+	n, err := r.ReadCloser.Read(p)
+	if err != nil && r.notify != nil {
+		close(r.notify)
+		r.notify = nil
+	}
+	return n, err
+}
+
+func (r *notifyingReader) Close() error {
+	err := r.ReadCloser.Close()
+	if r.notify != nil {
+		close(r.notify)
+		r.notify = nil
+	}
+	return err
+}
diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go
new file mode 100644
index 00000000..d20f52b7
--- /dev/null
+++ b/vendor/golang.org/x/net/context/go17.go
@@ -0,0 +1,72 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package context
+
+import (
+	"context" // standard library's context, as of Go 1.7
+	"time"
+)
+
+var (
+	todo       = context.TODO()
+	background = context.Background()
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = context.Canceled
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = context.DeadlineExceeded
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+	ctx, f := context.WithCancel(parent)
+	return ctx, CancelFunc(f)
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+	ctx, f := context.WithDeadline(parent, deadline)
+	return ctx, CancelFunc(f)
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// 	func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// 		ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// 		defer cancel()  // releases resources if slowOperation completes before timeout elapses
+// 		return slowOperation(ctx)
+// 	}
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+	return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+	return context.WithValue(parent, key, val)
+}
diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go
new file mode 100644
index 00000000..0f35592d
--- /dev/null
+++ b/vendor/golang.org/x/net/context/pre_go17.go
@@ -0,0 +1,300 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package context
+
+import (
+	"errors"
+	"fmt"
+	"sync"
+	"time"
+)
+
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+	return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+	return nil
+}
+
+func (*emptyCtx) Err() error {
+	return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+	return nil
+}
+
+func (e *emptyCtx) String() string {
+	switch e {
+	case background:
+		return "context.Background"
+	case todo:
+		return "context.TODO"
+	}
+	return "unknown empty Context"
+}
+
+var (
+	background = new(emptyCtx)
+	todo       = new(emptyCtx)
+)
+
+// Canceled is the error returned by Context.Err when the context is canceled.
+var Canceled = errors.New("context canceled")
+
+// DeadlineExceeded is the error returned by Context.Err when the context's
+// deadline passes.
+var DeadlineExceeded = errors.New("context deadline exceeded")
+
+// WithCancel returns a copy of parent with a new Done channel. The returned
+// context's Done channel is closed when the returned cancel function is called
+// or when the parent context's Done channel is closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
+	c := newCancelCtx(parent)
+	propagateCancel(parent, c)
+	return c, func() { c.cancel(true, Canceled) }
+}
+
+// newCancelCtx returns an initialized cancelCtx.
+func newCancelCtx(parent Context) *cancelCtx {
+	return &cancelCtx{
+		Context: parent,
+		done:    make(chan struct{}),
+	}
+}
+
+// propagateCancel arranges for child to be canceled when parent is.
+func propagateCancel(parent Context, child canceler) {
+	if parent.Done() == nil {
+		return // parent is never canceled
+	}
+	if p, ok := parentCancelCtx(parent); ok {
+		p.mu.Lock()
+		if p.err != nil {
+			// parent has already been canceled
+			child.cancel(false, p.err)
+		} else {
+			if p.children == nil {
+				p.children = make(map[canceler]bool)
+			}
+			p.children[child] = true
+		}
+		p.mu.Unlock()
+	} else {
+		go func() {
+			select {
+			case <-parent.Done():
+				child.cancel(false, parent.Err())
+			case <-child.Done():
+			}
+		}()
+	}
+}
+
+// parentCancelCtx follows a chain of parent references until it finds a
+// *cancelCtx. This function understands how each of the concrete types in this
+// package represents its parent.
+func parentCancelCtx(parent Context) (*cancelCtx, bool) {
+	for {
+		switch c := parent.(type) {
+		case *cancelCtx:
+			return c, true
+		case *timerCtx:
+			return c.cancelCtx, true
+		case *valueCtx:
+			parent = c.Context
+		default:
+			return nil, false
+		}
+	}
+}
+
+// removeChild removes a context from its parent.
+func removeChild(parent Context, child canceler) {
+	p, ok := parentCancelCtx(parent)
+	if !ok {
+		return
+	}
+	p.mu.Lock()
+	if p.children != nil {
+		delete(p.children, child)
+	}
+	p.mu.Unlock()
+}
+
+// A canceler is a context type that can be canceled directly. The
+// implementations are *cancelCtx and *timerCtx.
+type canceler interface {
+	cancel(removeFromParent bool, err error)
+	Done() <-chan struct{}
+}
+
+// A cancelCtx can be canceled. When canceled, it also cancels any children
+// that implement canceler.
+type cancelCtx struct {
+	Context
+
+	done chan struct{} // closed by the first cancel call.
+
+	mu       sync.Mutex
+	children map[canceler]bool // set to nil by the first cancel call
+	err      error             // set to non-nil by the first cancel call
+}
+
+func (c *cancelCtx) Done() <-chan struct{} {
+	return c.done
+}
+
+func (c *cancelCtx) Err() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	return c.err
+}
+
+func (c *cancelCtx) String() string {
+	return fmt.Sprintf("%v.WithCancel", c.Context)
+}
+
+// cancel closes c.done, cancels each of c's children, and, if
+// removeFromParent is true, removes c from its parent's children.
+func (c *cancelCtx) cancel(removeFromParent bool, err error) {
+	if err == nil {
+		panic("context: internal error: missing cancel error")
+	}
+	c.mu.Lock()
+	if c.err != nil {
+		c.mu.Unlock()
+		return // already canceled
+	}
+	c.err = err
+	close(c.done)
+	for child := range c.children {
+		// NOTE: acquiring the child's lock while holding parent's lock.
+		child.cancel(false, err)
+	}
+	c.children = nil
+	c.mu.Unlock()
+
+	if removeFromParent {
+		removeChild(c.Context, c)
+	}
+}
+
+// WithDeadline returns a copy of the parent context with the deadline adjusted
+// to be no later than d. If the parent's deadline is already earlier than d,
+// WithDeadline(parent, d) is semantically equivalent to parent. The returned
+// context's Done channel is closed when the deadline expires, when the returned
+// cancel function is called, or when the parent context's Done channel is
+// closed, whichever happens first.
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete.
+func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
+	if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
+		// The current deadline is already sooner than the new one.
+		return WithCancel(parent)
+	}
+	c := &timerCtx{
+		cancelCtx: newCancelCtx(parent),
+		deadline:  deadline,
+	}
+	propagateCancel(parent, c)
+	d := deadline.Sub(time.Now())
+	if d <= 0 {
+		c.cancel(true, DeadlineExceeded) // deadline has already passed
+		return c, func() { c.cancel(true, Canceled) }
+	}
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.err == nil {
+		c.timer = time.AfterFunc(d, func() {
+			c.cancel(true, DeadlineExceeded)
+		})
+	}
+	return c, func() { c.cancel(true, Canceled) }
+}
+
+// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
+// implement Done and Err. It implements cancel by stopping its timer then
+// delegating to cancelCtx.cancel.
+type timerCtx struct {
+	*cancelCtx
+	timer *time.Timer // Under cancelCtx.mu.
+
+	deadline time.Time
+}
+
+func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
+	return c.deadline, true
+}
+
+func (c *timerCtx) String() string {
+	return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
+}
+
+func (c *timerCtx) cancel(removeFromParent bool, err error) {
+	c.cancelCtx.cancel(false, err)
+	if removeFromParent {
+		// Remove this timerCtx from its parent cancelCtx's children.
+		removeChild(c.cancelCtx.Context, c)
+	}
+	c.mu.Lock()
+	if c.timer != nil {
+		c.timer.Stop()
+		c.timer = nil
+	}
+	c.mu.Unlock()
+}
+
+// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
+//
+// Canceling this context releases resources associated with it, so code should
+// call cancel as soon as the operations running in this Context complete:
+//
+// 	func slowOperationWithTimeout(ctx context.Context) (Result, error) {
+// 		ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
+// 		defer cancel()  // releases resources if slowOperation completes before timeout elapses
+// 		return slowOperation(ctx)
+// 	}
+func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
+	return WithDeadline(parent, time.Now().Add(timeout))
+}
+
+// WithValue returns a copy of parent in which the value associated with key is
+// val.
+//
+// Use context Values only for request-scoped data that transits processes and
+// APIs, not for passing optional parameters to functions.
+func WithValue(parent Context, key interface{}, val interface{}) Context {
+	return &valueCtx{parent, key, val}
+}
+
+// A valueCtx carries a key-value pair. It implements Value for that key and
+// delegates all other calls to the embedded Context.
+type valueCtx struct {
+	Context
+	key, val interface{}
+}
+
+func (c *valueCtx) String() string {
+	return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
+}
+
+func (c *valueCtx) Value(key interface{}) interface{} {
+	if c.key == key {
+		return c.val
+	}
+	return c.Context.Value(key)
+}
diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS
new file mode 100644
index 00000000..15167cd7
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
new file mode 100644
index 00000000..46aa2b12
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md
@@ -0,0 +1,31 @@
+# Contributing to Go
+
+Go is an open source project.
+
+It is the work of hundreds of contributors. We appreciate your help!
+
+
+## Filing issues
+
+When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
+The gophers there will answer or ask you to file an issue if you've tripped over a bug.
+
+## Contributing code
+
+Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
+before sending patches.
+
+**We do not accept GitHub pull requests**
+(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
+
+Unless otherwise noted, the Go source files are distributed under
+the BSD-style license found in the LICENSE file.
+
diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS
new file mode 100644
index 00000000..1c4577e9
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE
new file mode 100644
index 00000000..d02f24fd
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The oauth2 Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md
new file mode 100644
index 00000000..b0ddf3c1
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/README.md
@@ -0,0 +1,74 @@
+# OAuth2 for Go
+
+[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2)
+[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2)
+
+oauth2 package contains a client implementation for OAuth 2.0 spec.
+
+## Installation
+
+~~~~
+go get golang.org/x/oauth2
+~~~~
+
+See godoc for further documentation and examples.
+
+* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
+* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
+
+
+## App Engine
+
+In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor
+of the [`context.Context`](https://golang.org/x/net/context#Context) type from
+the `golang.org/x/net/context` package
+
+This means its no longer possible to use the "Classic App Engine"
+`appengine.Context` type with the `oauth2` package. (You're using
+Classic App Engine if you import the package `"appengine"`.)
+
+To work around this, you may use the new `"google.golang.org/appengine"`
+package. This package has almost the same API as the `"appengine"` package,
+but it can be fetched with `go get` and used on "Managed VMs" and well as
+Classic App Engine.
+
+See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
+for information on updating your app.
+
+If you don't want to update your entire app to use the new App Engine packages,
+you may use both sets of packages in parallel, using only the new packages
+with the `oauth2` package.
+
+	import (
+		"golang.org/x/net/context"
+		"golang.org/x/oauth2"
+		"golang.org/x/oauth2/google"
+		newappengine "google.golang.org/appengine"
+		newurlfetch "google.golang.org/appengine/urlfetch"
+
+		"appengine"
+	)
+
+	func handler(w http.ResponseWriter, r *http.Request) {
+		var c appengine.Context = appengine.NewContext(r)
+		c.Infof("Logging a message with the old package")
+
+		var ctx context.Context = newappengine.NewContext(r)
+		client := &http.Client{
+			Transport: &oauth2.Transport{
+				Source: google.AppEngineTokenSource(ctx, "scope"),
+				Base:   &newurlfetch.Transport{Context: ctx},
+			},
+		}
+		client.Get("...")
+	}
+
+## Contributing
+
+We appreciate your help!
+
+To contribute, please read the contribution guidelines:
+	https://golang.org/doc/contribute.html
+
+Note that the Go project does not use GitHub pull requests but
+uses Gerrit for code reviews. See the contribution guide for details.
diff --git a/vendor/golang.org/x/oauth2/client_appengine.go b/vendor/golang.org/x/oauth2/client_appengine.go
new file mode 100644
index 00000000..8962c49d
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/client_appengine.go
@@ -0,0 +1,25 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+// App Engine hooks.
+
+package oauth2
+
+import (
+	"net/http"
+
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2/internal"
+	"google.golang.org/appengine/urlfetch"
+)
+
+func init() {
+	internal.RegisterContextClientFunc(contextClientAppEngine)
+}
+
+func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
+	return urlfetch.Client(ctx), nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/appengine.go b/vendor/golang.org/x/oauth2/google/appengine.go
new file mode 100644
index 00000000..50d918b8
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/appengine.go
@@ -0,0 +1,89 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2"
+)
+
+// appengineFlex is set at init time by appengineflex_hook.go. If true, we are on App Engine Flex.
+var appengineFlex bool
+
+// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
+var appengineTokenFunc func(c context.Context, scopes ...string) (token string, expiry time.Time, err error)
+
+// Set at init time by appengine_hook.go. If nil, we're not on App Engine.
+var appengineAppIDFunc func(c context.Context) string
+
+// AppEngineTokenSource returns a token source that fetches tokens
+// issued to the current App Engine application's service account.
+// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
+// that involves user accounts, see oauth2.Config instead.
+//
+// The provided context must have come from appengine.NewContext.
+func AppEngineTokenSource(ctx context.Context, scope ...string) oauth2.TokenSource {
+	if appengineTokenFunc == nil {
+		panic("google: AppEngineTokenSource can only be used on App Engine.")
+	}
+	scopes := append([]string{}, scope...)
+	sort.Strings(scopes)
+	return &appEngineTokenSource{
+		ctx:    ctx,
+		scopes: scopes,
+		key:    strings.Join(scopes, " "),
+	}
+}
+
+// aeTokens helps the fetched tokens to be reused until their expiration.
+var (
+	aeTokensMu sync.Mutex
+	aeTokens   = make(map[string]*tokenLock) // key is space-separated scopes
+)
+
+type tokenLock struct {
+	mu sync.Mutex // guards t; held while fetching or updating t
+	t  *oauth2.Token
+}
+
+type appEngineTokenSource struct {
+	ctx    context.Context
+	scopes []string
+	key    string // to aeTokens map; space-separated scopes
+}
+
+func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
+	if appengineTokenFunc == nil {
+		panic("google: AppEngineTokenSource can only be used on App Engine.")
+	}
+
+	aeTokensMu.Lock()
+	tok, ok := aeTokens[ts.key]
+	if !ok {
+		tok = &tokenLock{}
+		aeTokens[ts.key] = tok
+	}
+	aeTokensMu.Unlock()
+
+	tok.mu.Lock()
+	defer tok.mu.Unlock()
+	if tok.t.Valid() {
+		return tok.t, nil
+	}
+	access, exp, err := appengineTokenFunc(ts.ctx, ts.scopes...)
+	if err != nil {
+		return nil, err
+	}
+	tok.t = &oauth2.Token{
+		AccessToken: access,
+		Expiry:      exp,
+	}
+	return tok.t, nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/appengine_hook.go b/vendor/golang.org/x/oauth2/google/appengine_hook.go
new file mode 100644
index 00000000..56669eaa
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/appengine_hook.go
@@ -0,0 +1,14 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine appenginevm
+
+package google
+
+import "google.golang.org/appengine"
+
+func init() {
+	appengineTokenFunc = appengine.AccessToken
+	appengineAppIDFunc = appengine.AppID
+}
diff --git a/vendor/golang.org/x/oauth2/google/appengineflex_hook.go b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go
new file mode 100644
index 00000000..5d0231af
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/appengineflex_hook.go
@@ -0,0 +1,11 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appenginevm
+
+package google
+
+func init() {
+	appengineFlex = true // Flex doesn't support appengine.AccessToken; depend on metadata server.
+}
diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go
new file mode 100644
index 00000000..004ed4ea
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/default.go
@@ -0,0 +1,130 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"path/filepath"
+	"runtime"
+
+	"cloud.google.com/go/compute/metadata"
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2"
+)
+
+// DefaultCredentials holds "Application Default Credentials".
+// For more details, see:
+// https://developers.google.com/accounts/docs/application-default-credentials
+type DefaultCredentials struct {
+	ProjectID   string // may be empty
+	TokenSource oauth2.TokenSource
+}
+
+// DefaultClient returns an HTTP Client that uses the
+// DefaultTokenSource to obtain authentication credentials.
+func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) {
+	ts, err := DefaultTokenSource(ctx, scope...)
+	if err != nil {
+		return nil, err
+	}
+	return oauth2.NewClient(ctx, ts), nil
+}
+
+// DefaultTokenSource returns the token source for
+// "Application Default Credentials".
+// It is a shortcut for FindDefaultCredentials(ctx, scope).TokenSource.
+func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSource, error) {
+	creds, err := FindDefaultCredentials(ctx, scope...)
+	if err != nil {
+		return nil, err
+	}
+	return creds.TokenSource, nil
+}
+
+// FindDefaultCredentials searches for "Application Default Credentials".
+//
+// It looks for credentials in the following places,
+// preferring the first location found:
+//
+//   1. A JSON file whose path is specified by the
+//      GOOGLE_APPLICATION_CREDENTIALS environment variable.
+//   2. A JSON file in a location known to the gcloud command-line tool.
+//      On Windows, this is %APPDATA%/gcloud/application_default_credentials.json.
+//      On other systems, $HOME/.config/gcloud/application_default_credentials.json.
+//   3. On Google App Engine it uses the appengine.AccessToken function.
+//   4. On Google Compute Engine and Google App Engine Managed VMs, it fetches
+//      credentials from the metadata server.
+//      (In this final case any provided scopes are ignored.)
+func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCredentials, error) {
+	// First, try the environment variable.
+	const envVar = "GOOGLE_APPLICATION_CREDENTIALS"
+	if filename := os.Getenv(envVar); filename != "" {
+		creds, err := readCredentialsFile(ctx, filename, scope)
+		if err != nil {
+			return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err)
+		}
+		return creds, nil
+	}
+
+	// Second, try a well-known file.
+	filename := wellKnownFile()
+	if creds, err := readCredentialsFile(ctx, filename, scope); err == nil {
+		return creds, nil
+	} else if !os.IsNotExist(err) {
+		return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err)
+	}
+
+	// Third, if we're on Google App Engine use those credentials.
+	if appengineTokenFunc != nil && !appengineFlex {
+		return &DefaultCredentials{
+			ProjectID:   appengineAppIDFunc(ctx),
+			TokenSource: AppEngineTokenSource(ctx, scope...),
+		}, nil
+	}
+
+	// Fourth, if we're on Google Compute Engine use the metadata server.
+	if metadata.OnGCE() {
+		id, _ := metadata.ProjectID()
+		return &DefaultCredentials{
+			ProjectID:   id,
+			TokenSource: ComputeTokenSource(""),
+		}, nil
+	}
+
+	// None are found; return helpful error.
+	const url = "https://developers.google.com/accounts/docs/application-default-credentials"
+	return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url)
+}
+
+func wellKnownFile() string {
+	const f = "application_default_credentials.json"
+	if runtime.GOOS == "windows" {
+		return filepath.Join(os.Getenv("APPDATA"), "gcloud", f)
+	}
+	return filepath.Join(guessUnixHomeDir(), ".config", "gcloud", f)
+}
+
+func readCredentialsFile(ctx context.Context, filename string, scopes []string) (*DefaultCredentials, error) {
+	b, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return nil, err
+	}
+	var f credentialsFile
+	if err := json.Unmarshal(b, &f); err != nil {
+		return nil, err
+	}
+	ts, err := f.tokenSource(ctx, append([]string(nil), scopes...))
+	if err != nil {
+		return nil, err
+	}
+	return &DefaultCredentials{
+		ProjectID:   f.ProjectID,
+		TokenSource: ts,
+	}, nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go
new file mode 100644
index 00000000..66a8b0e1
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/google.go
@@ -0,0 +1,202 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package google provides support for making OAuth2 authorized and
+// authenticated HTTP requests to Google APIs.
+// It supports the Web server flow, client-side credentials, service accounts,
+// Google Compute Engine service accounts, and Google App Engine service
+// accounts.
+//
+// For more information, please read
+// https://developers.google.com/accounts/docs/OAuth2
+// and
+// https://developers.google.com/accounts/docs/application-default-credentials.
+package google // import "golang.org/x/oauth2/google"
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strings"
+	"time"
+
+	"cloud.google.com/go/compute/metadata"
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/jwt"
+)
+
+// Endpoint is Google's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+	AuthURL:  "https://accounts.google.com/o/oauth2/auth",
+	TokenURL: "https://accounts.google.com/o/oauth2/token",
+}
+
+// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
+const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
+
+// ConfigFromJSON uses a Google Developers Console client_credentials.json
+// file to construct a config.
+// client_credentials.json can be downloaded from
+// https://console.developers.google.com, under "Credentials". Download the Web
+// application credentials in the JSON format and provide the contents of the
+// file as jsonKey.
+func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) {
+	type cred struct {
+		ClientID     string   `json:"client_id"`
+		ClientSecret string   `json:"client_secret"`
+		RedirectURIs []string `json:"redirect_uris"`
+		AuthURI      string   `json:"auth_uri"`
+		TokenURI     string   `json:"token_uri"`
+	}
+	var j struct {
+		Web       *cred `json:"web"`
+		Installed *cred `json:"installed"`
+	}
+	if err := json.Unmarshal(jsonKey, &j); err != nil {
+		return nil, err
+	}
+	var c *cred
+	switch {
+	case j.Web != nil:
+		c = j.Web
+	case j.Installed != nil:
+		c = j.Installed
+	default:
+		return nil, fmt.Errorf("oauth2/google: no credentials found")
+	}
+	if len(c.RedirectURIs) < 1 {
+		return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json")
+	}
+	return &oauth2.Config{
+		ClientID:     c.ClientID,
+		ClientSecret: c.ClientSecret,
+		RedirectURL:  c.RedirectURIs[0],
+		Scopes:       scope,
+		Endpoint: oauth2.Endpoint{
+			AuthURL:  c.AuthURI,
+			TokenURL: c.TokenURI,
+		},
+	}, nil
+}
+
+// JWTConfigFromJSON uses a Google Developers service account JSON key file to read
+// the credentials that authorize and authenticate the requests.
+// Create a service account on "Credentials" for your project at
+// https://console.developers.google.com to download a JSON key file.
+func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
+	var f credentialsFile
+	if err := json.Unmarshal(jsonKey, &f); err != nil {
+		return nil, err
+	}
+	if f.Type != serviceAccountKey {
+		return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey)
+	}
+	scope = append([]string(nil), scope...) // copy
+	return f.jwtConfig(scope), nil
+}
+
+// JSON key file types.
+const (
+	serviceAccountKey  = "service_account"
+	userCredentialsKey = "authorized_user"
+)
+
+// credentialsFile is the unmarshalled representation of a credentials file.
+type credentialsFile struct {
+	Type string `json:"type"` // serviceAccountKey or userCredentialsKey
+
+	// Service Account fields
+	ClientEmail  string `json:"client_email"`
+	PrivateKeyID string `json:"private_key_id"`
+	PrivateKey   string `json:"private_key"`
+	TokenURL     string `json:"token_uri"`
+	ProjectID    string `json:"project_id"`
+
+	// User Credential fields
+	// (These typically come from gcloud auth.)
+	ClientSecret string `json:"client_secret"`
+	ClientID     string `json:"client_id"`
+	RefreshToken string `json:"refresh_token"`
+}
+
+func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config {
+	cfg := &jwt.Config{
+		Email:        f.ClientEmail,
+		PrivateKey:   []byte(f.PrivateKey),
+		PrivateKeyID: f.PrivateKeyID,
+		Scopes:       scopes,
+		TokenURL:     f.TokenURL,
+	}
+	if cfg.TokenURL == "" {
+		cfg.TokenURL = JWTTokenURL
+	}
+	return cfg
+}
+
+func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) {
+	switch f.Type {
+	case serviceAccountKey:
+		cfg := f.jwtConfig(scopes)
+		return cfg.TokenSource(ctx), nil
+	case userCredentialsKey:
+		cfg := &oauth2.Config{
+			ClientID:     f.ClientID,
+			ClientSecret: f.ClientSecret,
+			Scopes:       scopes,
+			Endpoint:     Endpoint,
+		}
+		tok := &oauth2.Token{RefreshToken: f.RefreshToken}
+		return cfg.TokenSource(ctx, tok), nil
+	case "":
+		return nil, errors.New("missing 'type' field in credentials")
+	default:
+		return nil, fmt.Errorf("unknown credential type: %q", f.Type)
+	}
+}
+
+// ComputeTokenSource returns a token source that fetches access tokens
+// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
+// this token source if your program is running on a GCE instance.
+// If no account is specified, "default" is used.
+// Further information about retrieving access tokens from the GCE metadata
+// server can be found at https://cloud.google.com/compute/docs/authentication.
+func ComputeTokenSource(account string) oauth2.TokenSource {
+	return oauth2.ReuseTokenSource(nil, computeSource{account: account})
+}
+
+type computeSource struct {
+	account string
+}
+
+func (cs computeSource) Token() (*oauth2.Token, error) {
+	if !metadata.OnGCE() {
+		return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE")
+	}
+	acct := cs.account
+	if acct == "" {
+		acct = "default"
+	}
+	tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
+	if err != nil {
+		return nil, err
+	}
+	var res struct {
+		AccessToken  string `json:"access_token"`
+		ExpiresInSec int    `json:"expires_in"`
+		TokenType    string `json:"token_type"`
+	}
+	err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res)
+	if err != nil {
+		return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err)
+	}
+	if res.ExpiresInSec == 0 || res.AccessToken == "" {
+		return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata")
+	}
+	return &oauth2.Token{
+		AccessToken: res.AccessToken,
+		TokenType:   res.TokenType,
+		Expiry:      time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
+	}, nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/jwt.go b/vendor/golang.org/x/oauth2/google/jwt.go
new file mode 100644
index 00000000..b0fdb3a8
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/jwt.go
@@ -0,0 +1,74 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+	"crypto/rsa"
+	"fmt"
+	"time"
+
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/internal"
+	"golang.org/x/oauth2/jws"
+)
+
+// JWTAccessTokenSourceFromJSON uses a Google Developers service account JSON
+// key file to read the credentials that authorize and authenticate the
+// requests, and returns a TokenSource that does not use any OAuth2 flow but
+// instead creates a JWT and sends that as the access token.
+// The audience is typically a URL that specifies the scope of the credentials.
+//
+// Note that this is not a standard OAuth flow, but rather an
+// optimization supported by a few Google services.
+// Unless you know otherwise, you should use JWTConfigFromJSON instead.
+func JWTAccessTokenSourceFromJSON(jsonKey []byte, audience string) (oauth2.TokenSource, error) {
+	cfg, err := JWTConfigFromJSON(jsonKey)
+	if err != nil {
+		return nil, fmt.Errorf("google: could not parse JSON key: %v", err)
+	}
+	pk, err := internal.ParseKey(cfg.PrivateKey)
+	if err != nil {
+		return nil, fmt.Errorf("google: could not parse key: %v", err)
+	}
+	ts := &jwtAccessTokenSource{
+		email:    cfg.Email,
+		audience: audience,
+		pk:       pk,
+		pkID:     cfg.PrivateKeyID,
+	}
+	tok, err := ts.Token()
+	if err != nil {
+		return nil, err
+	}
+	return oauth2.ReuseTokenSource(tok, ts), nil
+}
+
+type jwtAccessTokenSource struct {
+	email, audience string
+	pk              *rsa.PrivateKey
+	pkID            string
+}
+
+func (ts *jwtAccessTokenSource) Token() (*oauth2.Token, error) {
+	iat := time.Now()
+	exp := iat.Add(time.Hour)
+	cs := &jws.ClaimSet{
+		Iss: ts.email,
+		Sub: ts.email,
+		Aud: ts.audience,
+		Iat: iat.Unix(),
+		Exp: exp.Unix(),
+	}
+	hdr := &jws.Header{
+		Algorithm: "RS256",
+		Typ:       "JWT",
+		KeyID:     string(ts.pkID),
+	}
+	msg, err := jws.Encode(hdr, cs, ts.pk)
+	if err != nil {
+		return nil, fmt.Errorf("google: could not encode JWT: %v", err)
+	}
+	return &oauth2.Token{AccessToken: msg, TokenType: "Bearer", Expiry: exp}, nil
+}
diff --git a/vendor/golang.org/x/oauth2/google/sdk.go b/vendor/golang.org/x/oauth2/google/sdk.go
new file mode 100644
index 00000000..bdc18084
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/google/sdk.go
@@ -0,0 +1,172 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/http"
+	"os"
+	"os/user"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/internal"
+)
+
+type sdkCredentials struct {
+	Data []struct {
+		Credential struct {
+			ClientID     string     `json:"client_id"`
+			ClientSecret string     `json:"client_secret"`
+			AccessToken  string     `json:"access_token"`
+			RefreshToken string     `json:"refresh_token"`
+			TokenExpiry  *time.Time `json:"token_expiry"`
+		} `json:"credential"`
+		Key struct {
+			Account string `json:"account"`
+			Scope   string `json:"scope"`
+		} `json:"key"`
+	}
+}
+
+// An SDKConfig provides access to tokens from an account already
+// authorized via the Google Cloud SDK.
+type SDKConfig struct {
+	conf         oauth2.Config
+	initialToken *oauth2.Token
+}
+
+// NewSDKConfig creates an SDKConfig for the given Google Cloud SDK
+// account. If account is empty, the account currently active in
+// Google Cloud SDK properties is used.
+// Google Cloud SDK credentials must be created by running `gcloud auth`
+// before using this function.
+// The Google Cloud SDK is available at https://cloud.google.com/sdk/.
+func NewSDKConfig(account string) (*SDKConfig, error) {
+	configPath, err := sdkConfigPath()
+	if err != nil {
+		return nil, fmt.Errorf("oauth2/google: error getting SDK config path: %v", err)
+	}
+	credentialsPath := filepath.Join(configPath, "credentials")
+	f, err := os.Open(credentialsPath)
+	if err != nil {
+		return nil, fmt.Errorf("oauth2/google: failed to load SDK credentials: %v", err)
+	}
+	defer f.Close()
+
+	var c sdkCredentials
+	if err := json.NewDecoder(f).Decode(&c); err != nil {
+		return nil, fmt.Errorf("oauth2/google: failed to decode SDK credentials from %q: %v", credentialsPath, err)
+	}
+	if len(c.Data) == 0 {
+		return nil, fmt.Errorf("oauth2/google: no credentials found in %q, run `gcloud auth login` to create one", credentialsPath)
+	}
+	if account == "" {
+		propertiesPath := filepath.Join(configPath, "properties")
+		f, err := os.Open(propertiesPath)
+		if err != nil {
+			return nil, fmt.Errorf("oauth2/google: failed to load SDK properties: %v", err)
+		}
+		defer f.Close()
+		ini, err := internal.ParseINI(f)
+		if err != nil {
+			return nil, fmt.Errorf("oauth2/google: failed to parse SDK properties %q: %v", propertiesPath, err)
+		}
+		core, ok := ini["core"]
+		if !ok {
+			return nil, fmt.Errorf("oauth2/google: failed to find [core] section in %v", ini)
+		}
+		active, ok := core["account"]
+		if !ok {
+			return nil, fmt.Errorf("oauth2/google: failed to find %q attribute in %v", "account", core)
+		}
+		account = active
+	}
+
+	for _, d := range c.Data {
+		if account == "" || d.Key.Account == account {
+			if d.Credential.AccessToken == "" && d.Credential.RefreshToken == "" {
+				return nil, fmt.Errorf("oauth2/google: no token available for account %q", account)
+			}
+			var expiry time.Time
+			if d.Credential.TokenExpiry != nil {
+				expiry = *d.Credential.TokenExpiry
+			}
+			return &SDKConfig{
+				conf: oauth2.Config{
+					ClientID:     d.Credential.ClientID,
+					ClientSecret: d.Credential.ClientSecret,
+					Scopes:       strings.Split(d.Key.Scope, " "),
+					Endpoint:     Endpoint,
+					RedirectURL:  "oob",
+				},
+				initialToken: &oauth2.Token{
+					AccessToken:  d.Credential.AccessToken,
+					RefreshToken: d.Credential.RefreshToken,
+					Expiry:       expiry,
+				},
+			}, nil
+		}
+	}
+	return nil, fmt.Errorf("oauth2/google: no such credentials for account %q", account)
+}
+
+// Client returns an HTTP client using Google Cloud SDK credentials to
+// authorize requests. The token will auto-refresh as necessary. The
+// underlying http.RoundTripper will be obtained using the provided
+// context. The returned client and its Transport should not be
+// modified.
+func (c *SDKConfig) Client(ctx context.Context) *http.Client {
+	return &http.Client{
+		Transport: &oauth2.Transport{
+			Source: c.TokenSource(ctx),
+		},
+	}
+}
+
+// TokenSource returns an oauth2.TokenSource that retrieve tokens from
+// Google Cloud SDK credentials using the provided context.
+// It will returns the current access token stored in the credentials,
+// and refresh it when it expires, but it won't update the credentials
+// with the new access token.
+func (c *SDKConfig) TokenSource(ctx context.Context) oauth2.TokenSource {
+	return c.conf.TokenSource(ctx, c.initialToken)
+}
+
+// Scopes are the OAuth 2.0 scopes the current account is authorized for.
+func (c *SDKConfig) Scopes() []string {
+	return c.conf.Scopes
+}
+
+// sdkConfigPath tries to guess where the gcloud config is located.
+// It can be overridden during tests.
+var sdkConfigPath = func() (string, error) {
+	if runtime.GOOS == "windows" {
+		return filepath.Join(os.Getenv("APPDATA"), "gcloud"), nil
+	}
+	homeDir := guessUnixHomeDir()
+	if homeDir == "" {
+		return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty")
+	}
+	return filepath.Join(homeDir, ".config", "gcloud"), nil
+}
+
+func guessUnixHomeDir() string {
+	// Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470
+	if v := os.Getenv("HOME"); v != "" {
+		return v
+	}
+	// Else, fall back to user.Current:
+	if u, err := user.Current(); err == nil {
+		return u.HomeDir
+	}
+	return ""
+}
diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go
new file mode 100644
index 00000000..e31541b3
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/oauth2.go
@@ -0,0 +1,76 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+	"bufio"
+	"crypto/rsa"
+	"crypto/x509"
+	"encoding/pem"
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+)
+
+// ParseKey converts the binary contents of a private key file
+// to an *rsa.PrivateKey. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func ParseKey(key []byte) (*rsa.PrivateKey, error) {
+	block, _ := pem.Decode(key)
+	if block != nil {
+		key = block.Bytes
+	}
+	parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+	if err != nil {
+		parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+		if err != nil {
+			return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
+		}
+	}
+	parsed, ok := parsedKey.(*rsa.PrivateKey)
+	if !ok {
+		return nil, errors.New("private key is invalid")
+	}
+	return parsed, nil
+}
+
+func ParseINI(ini io.Reader) (map[string]map[string]string, error) {
+	result := map[string]map[string]string{
+		"": {}, // root section
+	}
+	scanner := bufio.NewScanner(ini)
+	currentSection := ""
+	for scanner.Scan() {
+		line := strings.TrimSpace(scanner.Text())
+		if strings.HasPrefix(line, ";") {
+			// comment.
+			continue
+		}
+		if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
+			currentSection = strings.TrimSpace(line[1 : len(line)-1])
+			result[currentSection] = map[string]string{}
+			continue
+		}
+		parts := strings.SplitN(line, "=", 2)
+		if len(parts) == 2 && parts[0] != "" {
+			result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
+		}
+	}
+	if err := scanner.Err(); err != nil {
+		return nil, fmt.Errorf("error scanning ini: %v", err)
+	}
+	return result, nil
+}
+
+func CondVal(v string) []string {
+	if v == "" {
+		return nil
+	}
+	return []string{v}
+}
diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go
new file mode 100644
index 00000000..0487c81b
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/token.go
@@ -0,0 +1,251 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"mime"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/net/context/ctxhttp"
+)
+
+// Token represents the crendentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// This type is a mirror of oauth2.Token and exists to break
+// an otherwise-circular dependency. Other internal packages
+// should convert this Token into an oauth2.Token before use.
+type Token struct {
+	// AccessToken is the token that authorizes and authenticates
+	// the requests.
+	AccessToken string
+
+	// TokenType is the type of token.
+	// The Type method returns either this or "Bearer", the default.
+	TokenType string
+
+	// RefreshToken is a token that's used by the application
+	// (as opposed to the user) to refresh the access token
+	// if it expires.
+	RefreshToken string
+
+	// Expiry is the optional expiration time of the access token.
+	//
+	// If zero, TokenSource implementations will reuse the same
+	// token forever and RefreshToken or equivalent
+	// mechanisms for that TokenSource will not be used.
+	Expiry time.Time
+
+	// Raw optionally contains extra metadata from the server
+	// when updating a token.
+	Raw interface{}
+}
+
+// tokenJSON is the struct representing the HTTP response from OAuth2
+// providers returning a token in JSON form.
+type tokenJSON struct {
+	AccessToken  string         `json:"access_token"`
+	TokenType    string         `json:"token_type"`
+	RefreshToken string         `json:"refresh_token"`
+	ExpiresIn    expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
+	Expires      expirationTime `json:"expires"`    // broken Facebook spelling of expires_in
+}
+
+func (e *tokenJSON) expiry() (t time.Time) {
+	if v := e.ExpiresIn; v != 0 {
+		return time.Now().Add(time.Duration(v) * time.Second)
+	}
+	if v := e.Expires; v != 0 {
+		return time.Now().Add(time.Duration(v) * time.Second)
+	}
+	return
+}
+
+type expirationTime int32
+
+func (e *expirationTime) UnmarshalJSON(b []byte) error {
+	var n json.Number
+	err := json.Unmarshal(b, &n)
+	if err != nil {
+		return err
+	}
+	i, err := n.Int64()
+	if err != nil {
+		return err
+	}
+	*e = expirationTime(i)
+	return nil
+}
+
+var brokenAuthHeaderProviders = []string{
+	"https://accounts.google.com/",
+	"https://api.codeswholesale.com/oauth/token",
+	"https://api.dropbox.com/",
+	"https://api.dropboxapi.com/",
+	"https://api.instagram.com/",
+	"https://api.netatmo.net/",
+	"https://api.odnoklassniki.ru/",
+	"https://api.pushbullet.com/",
+	"https://api.soundcloud.com/",
+	"https://api.twitch.tv/",
+	"https://app.box.com/",
+	"https://connect.stripe.com/",
+	"https://graph.facebook.com", // see https://github.com/golang/oauth2/issues/214
+	"https://login.microsoftonline.com/",
+	"https://login.salesforce.com/",
+	"https://login.windows.net",
+	"https://oauth.sandbox.trainingpeaks.com/",
+	"https://oauth.trainingpeaks.com/",
+	"https://oauth.vk.com/",
+	"https://openapi.baidu.com/",
+	"https://slack.com/",
+	"https://test-sandbox.auth.corp.google.com",
+	"https://test.salesforce.com/",
+	"https://user.gini.net/",
+	"https://www.douban.com/",
+	"https://www.googleapis.com/",
+	"https://www.linkedin.com/",
+	"https://www.strava.com/oauth/",
+	"https://www.wunderlist.com/oauth/",
+	"https://api.patreon.com/",
+	"https://sandbox.codeswholesale.com/oauth/token",
+	"https://api.sipgate.com/v1/authorization/oauth",
+}
+
+// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints.
+var brokenAuthHeaderDomains = []string{
+	".force.com",
+	".myshopify.com",
+	".okta.com",
+	".oktapreview.com",
+}
+
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {
+	brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL)
+}
+
+// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
+// implements the OAuth2 spec correctly
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+// In summary:
+// - Reddit only accepts client secret in the Authorization header
+// - Dropbox accepts either it in URL param or Auth header, but not both.
+// - Google only accepts URL param (not spec compliant?), not Auth header
+// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
+func providerAuthHeaderWorks(tokenURL string) bool {
+	for _, s := range brokenAuthHeaderProviders {
+		if strings.HasPrefix(tokenURL, s) {
+			// Some sites fail to implement the OAuth2 spec fully.
+			return false
+		}
+	}
+
+	if u, err := url.Parse(tokenURL); err == nil {
+		for _, s := range brokenAuthHeaderDomains {
+			if strings.HasSuffix(u.Host, s) {
+				return false
+			}
+		}
+	}
+
+	// Assume the provider implements the spec properly
+	// otherwise. We can add more exceptions as they're
+	// discovered. We will _not_ be adding configurable hooks
+	// to this package to let users select server bugs.
+	return true
+}
+
+func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) {
+	hc, err := ContextClient(ctx)
+	if err != nil {
+		return nil, err
+	}
+	bustedAuth := !providerAuthHeaderWorks(tokenURL)
+	if bustedAuth {
+		if clientID != "" {
+			v.Set("client_id", clientID)
+		}
+		if clientSecret != "" {
+			v.Set("client_secret", clientSecret)
+		}
+	}
+	req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode()))
+	if err != nil {
+		return nil, err
+	}
+	req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+	if !bustedAuth {
+		req.SetBasicAuth(clientID, clientSecret)
+	}
+	r, err := ctxhttp.Do(ctx, hc, req)
+	if err != nil {
+		return nil, err
+	}
+	defer r.Body.Close()
+	body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+	if err != nil {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+	}
+	if code := r.StatusCode; code < 200 || code > 299 {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
+	}
+
+	var token *Token
+	content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
+	switch content {
+	case "application/x-www-form-urlencoded", "text/plain":
+		vals, err := url.ParseQuery(string(body))
+		if err != nil {
+			return nil, err
+		}
+		token = &Token{
+			AccessToken:  vals.Get("access_token"),
+			TokenType:    vals.Get("token_type"),
+			RefreshToken: vals.Get("refresh_token"),
+			Raw:          vals,
+		}
+		e := vals.Get("expires_in")
+		if e == "" {
+			// TODO(jbd): Facebook's OAuth2 implementation is broken and
+			// returns expires_in field in expires. Remove the fallback to expires,
+			// when Facebook fixes their implementation.
+			e = vals.Get("expires")
+		}
+		expires, _ := strconv.Atoi(e)
+		if expires != 0 {
+			token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
+		}
+	default:
+		var tj tokenJSON
+		if err = json.Unmarshal(body, &tj); err != nil {
+			return nil, err
+		}
+		token = &Token{
+			AccessToken:  tj.AccessToken,
+			TokenType:    tj.TokenType,
+			RefreshToken: tj.RefreshToken,
+			Expiry:       tj.expiry(),
+			Raw:          make(map[string]interface{}),
+		}
+		json.Unmarshal(body, &token.Raw) // no error checks for optional fields
+	}
+	// Don't overwrite `RefreshToken` with an empty value
+	// if this was a token refreshing request.
+	if token.RefreshToken == "" {
+		token.RefreshToken = v.Get("refresh_token")
+	}
+	return token, nil
+}
diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go
new file mode 100644
index 00000000..f1f173e3
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/internal/transport.go
@@ -0,0 +1,69 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+	"net/http"
+
+	"golang.org/x/net/context"
+)
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient ContextKey
+
+// ContextKey is just an empty struct. It exists so HTTPClient can be
+// an immutable public variable with a unique type. It's immutable
+// because nobody else can create a ContextKey, being unexported.
+type ContextKey struct{}
+
+// ContextClientFunc is a func which tries to return an *http.Client
+// given a Context value. If it returns an error, the search stops
+// with that error.  If it returns (nil, nil), the search continues
+// down the list of registered funcs.
+type ContextClientFunc func(context.Context) (*http.Client, error)
+
+var contextClientFuncs []ContextClientFunc
+
+func RegisterContextClientFunc(fn ContextClientFunc) {
+	contextClientFuncs = append(contextClientFuncs, fn)
+}
+
+func ContextClient(ctx context.Context) (*http.Client, error) {
+	if ctx != nil {
+		if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
+			return hc, nil
+		}
+	}
+	for _, fn := range contextClientFuncs {
+		c, err := fn(ctx)
+		if err != nil {
+			return nil, err
+		}
+		if c != nil {
+			return c, nil
+		}
+	}
+	return http.DefaultClient, nil
+}
+
+func ContextTransport(ctx context.Context) http.RoundTripper {
+	hc, err := ContextClient(ctx)
+	// This is a rare error case (somebody using nil on App Engine).
+	if err != nil {
+		return ErrorTransport{err}
+	}
+	return hc.Transport
+}
+
+// ErrorTransport returns the specified error on RoundTrip.
+// This RoundTripper should be used in rare error cases where
+// error handling can be postponed to response handling time.
+type ErrorTransport struct{ Err error }
+
+func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) {
+	return nil, t.Err
+}
diff --git a/vendor/golang.org/x/oauth2/jws/jws.go b/vendor/golang.org/x/oauth2/jws/jws.go
new file mode 100644
index 00000000..683d2d27
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/jws/jws.go
@@ -0,0 +1,182 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jws provides a partial implementation
+// of JSON Web Signature encoding and decoding.
+// It exists to support the golang.org/x/oauth2 package.
+//
+// See RFC 7515.
+//
+// Deprecated: this package is not intended for public use and might be
+// removed in the future. It exists for internal use only.
+// Please switch to another JWS package or copy this package into your own
+// source tree.
+package jws // import "golang.org/x/oauth2/jws"
+
+import (
+	"bytes"
+	"crypto"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/sha256"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strings"
+	"time"
+)
+
+// ClaimSet contains information about the JWT signature including the
+// permissions being requested (scopes), the target of the token, the issuer,
+// the time the token was issued, and the lifetime of the token.
+type ClaimSet struct {
+	Iss   string `json:"iss"`             // email address of the client_id of the application making the access token request
+	Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
+	Aud   string `json:"aud"`             // descriptor of the intended target of the assertion (Optional).
+	Exp   int64  `json:"exp"`             // the expiration time of the assertion (seconds since Unix epoch)
+	Iat   int64  `json:"iat"`             // the time the assertion was issued (seconds since Unix epoch)
+	Typ   string `json:"typ,omitempty"`   // token type (Optional).
+
+	// Email for which the application is requesting delegated access (Optional).
+	Sub string `json:"sub,omitempty"`
+
+	// The old name of Sub. Client keeps setting Prn to be
+	// complaint with legacy OAuth 2.0 providers. (Optional)
+	Prn string `json:"prn,omitempty"`
+
+	// See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
+	// This array is marshalled using custom code (see (c *ClaimSet) encode()).
+	PrivateClaims map[string]interface{} `json:"-"`
+}
+
+func (c *ClaimSet) encode() (string, error) {
+	// Reverting time back for machines whose time is not perfectly in sync.
+	// If client machine's time is in the future according
+	// to Google servers, an access token will not be issued.
+	now := time.Now().Add(-10 * time.Second)
+	if c.Iat == 0 {
+		c.Iat = now.Unix()
+	}
+	if c.Exp == 0 {
+		c.Exp = now.Add(time.Hour).Unix()
+	}
+	if c.Exp < c.Iat {
+		return "", fmt.Errorf("jws: invalid Exp = %v; must be later than Iat = %v", c.Exp, c.Iat)
+	}
+
+	b, err := json.Marshal(c)
+	if err != nil {
+		return "", err
+	}
+
+	if len(c.PrivateClaims) == 0 {
+		return base64.RawURLEncoding.EncodeToString(b), nil
+	}
+
+	// Marshal private claim set and then append it to b.
+	prv, err := json.Marshal(c.PrivateClaims)
+	if err != nil {
+		return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims)
+	}
+
+	// Concatenate public and private claim JSON objects.
+	if !bytes.HasSuffix(b, []byte{'}'}) {
+		return "", fmt.Errorf("jws: invalid JSON %s", b)
+	}
+	if !bytes.HasPrefix(prv, []byte{'{'}) {
+		return "", fmt.Errorf("jws: invalid JSON %s", prv)
+	}
+	b[len(b)-1] = ','         // Replace closing curly brace with a comma.
+	b = append(b, prv[1:]...) // Append private claims.
+	return base64.RawURLEncoding.EncodeToString(b), nil
+}
+
+// Header represents the header for the signed JWS payloads.
+type Header struct {
+	// The algorithm used for signature.
+	Algorithm string `json:"alg"`
+
+	// Represents the token type.
+	Typ string `json:"typ"`
+
+	// The optional hint of which key is being used.
+	KeyID string `json:"kid,omitempty"`
+}
+
+func (h *Header) encode() (string, error) {
+	b, err := json.Marshal(h)
+	if err != nil {
+		return "", err
+	}
+	return base64.RawURLEncoding.EncodeToString(b), nil
+}
+
+// Decode decodes a claim set from a JWS payload.
+func Decode(payload string) (*ClaimSet, error) {
+	// decode returned id token to get expiry
+	s := strings.Split(payload, ".")
+	if len(s) < 2 {
+		// TODO(jbd): Provide more context about the error.
+		return nil, errors.New("jws: invalid token received")
+	}
+	decoded, err := base64.RawURLEncoding.DecodeString(s[1])
+	if err != nil {
+		return nil, err
+	}
+	c := &ClaimSet{}
+	err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c)
+	return c, err
+}
+
+// Signer returns a signature for the given data.
+type Signer func(data []byte) (sig []byte, err error)
+
+// EncodeWithSigner encodes a header and claim set with the provided signer.
+func EncodeWithSigner(header *Header, c *ClaimSet, sg Signer) (string, error) {
+	head, err := header.encode()
+	if err != nil {
+		return "", err
+	}
+	cs, err := c.encode()
+	if err != nil {
+		return "", err
+	}
+	ss := fmt.Sprintf("%s.%s", head, cs)
+	sig, err := sg([]byte(ss))
+	if err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("%s.%s", ss, base64.RawURLEncoding.EncodeToString(sig)), nil
+}
+
+// Encode encodes a signed JWS with provided header and claim set.
+// This invokes EncodeWithSigner using crypto/rsa.SignPKCS1v15 with the given RSA private key.
+func Encode(header *Header, c *ClaimSet, key *rsa.PrivateKey) (string, error) {
+	sg := func(data []byte) (sig []byte, err error) {
+		h := sha256.New()
+		h.Write(data)
+		return rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil))
+	}
+	return EncodeWithSigner(header, c, sg)
+}
+
+// Verify tests whether the provided JWT token's signature was produced by the private key
+// associated with the supplied public key.
+func Verify(token string, key *rsa.PublicKey) error {
+	parts := strings.Split(token, ".")
+	if len(parts) != 3 {
+		return errors.New("jws: invalid token received, token must have 3 parts")
+	}
+
+	signedContent := parts[0] + "." + parts[1]
+	signatureString, err := base64.RawURLEncoding.DecodeString(parts[2])
+	if err != nil {
+		return err
+	}
+
+	h := sha256.New()
+	h.Write([]byte(signedContent))
+	return rsa.VerifyPKCS1v15(key, crypto.SHA256, h.Sum(nil), []byte(signatureString))
+}
diff --git a/vendor/golang.org/x/oauth2/jwt/jwt.go b/vendor/golang.org/x/oauth2/jwt/jwt.go
new file mode 100644
index 00000000..e016db42
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/jwt/jwt.go
@@ -0,0 +1,159 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly
+// known as "two-legged OAuth 2.0".
+//
+// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12
+package jwt
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strings"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/internal"
+	"golang.org/x/oauth2/jws"
+)
+
+var (
+	defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
+	defaultHeader    = &jws.Header{Algorithm: "RS256", Typ: "JWT"}
+)
+
+// Config is the configuration for using JWT to fetch tokens,
+// commonly known as "two-legged OAuth 2.0".
+type Config struct {
+	// Email is the OAuth client identifier used when communicating with
+	// the configured OAuth provider.
+	Email string
+
+	// PrivateKey contains the contents of an RSA private key or the
+	// contents of a PEM file that contains a private key. The provided
+	// private key is used to sign JWT payloads.
+	// PEM containers with a passphrase are not supported.
+	// Use the following command to convert a PKCS 12 file into a PEM.
+	//
+	//    $ openssl pkcs12 -in key.p12 -out key.pem -nodes
+	//
+	PrivateKey []byte
+
+	// PrivateKeyID contains an optional hint indicating which key is being
+	// used.
+	PrivateKeyID string
+
+	// Subject is the optional user to impersonate.
+	Subject string
+
+	// Scopes optionally specifies a list of requested permission scopes.
+	Scopes []string
+
+	// TokenURL is the endpoint required to complete the 2-legged JWT flow.
+	TokenURL string
+
+	// Expires optionally specifies how long the token is valid for.
+	Expires time.Duration
+}
+
+// TokenSource returns a JWT TokenSource using the configuration
+// in c and the HTTP client from the provided context.
+func (c *Config) TokenSource(ctx context.Context) oauth2.TokenSource {
+	return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
+}
+
+// Client returns an HTTP client wrapping the context's
+// HTTP transport and adding Authorization headers with tokens
+// obtained from c.
+//
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context) *http.Client {
+	return oauth2.NewClient(ctx, c.TokenSource(ctx))
+}
+
+// jwtSource is a source that always does a signed JWT request for a token.
+// It should typically be wrapped with a reuseTokenSource.
+type jwtSource struct {
+	ctx  context.Context
+	conf *Config
+}
+
+func (js jwtSource) Token() (*oauth2.Token, error) {
+	pk, err := internal.ParseKey(js.conf.PrivateKey)
+	if err != nil {
+		return nil, err
+	}
+	hc := oauth2.NewClient(js.ctx, nil)
+	claimSet := &jws.ClaimSet{
+		Iss:   js.conf.Email,
+		Scope: strings.Join(js.conf.Scopes, " "),
+		Aud:   js.conf.TokenURL,
+	}
+	if subject := js.conf.Subject; subject != "" {
+		claimSet.Sub = subject
+		// prn is the old name of sub. Keep setting it
+		// to be compatible with legacy OAuth 2.0 providers.
+		claimSet.Prn = subject
+	}
+	if t := js.conf.Expires; t > 0 {
+		claimSet.Exp = time.Now().Add(t).Unix()
+	}
+	h := *defaultHeader
+	h.KeyID = js.conf.PrivateKeyID
+	payload, err := jws.Encode(&h, claimSet, pk)
+	if err != nil {
+		return nil, err
+	}
+	v := url.Values{}
+	v.Set("grant_type", defaultGrantType)
+	v.Set("assertion", payload)
+	resp, err := hc.PostForm(js.conf.TokenURL, v)
+	if err != nil {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+	}
+	defer resp.Body.Close()
+	body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
+	if err != nil {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+	}
+	if c := resp.StatusCode; c < 200 || c > 299 {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body)
+	}
+	// tokenRes is the JSON response body.
+	var tokenRes struct {
+		AccessToken string `json:"access_token"`
+		TokenType   string `json:"token_type"`
+		IDToken     string `json:"id_token"`
+		ExpiresIn   int64  `json:"expires_in"` // relative seconds from now
+	}
+	if err := json.Unmarshal(body, &tokenRes); err != nil {
+		return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+	}
+	token := &oauth2.Token{
+		AccessToken: tokenRes.AccessToken,
+		TokenType:   tokenRes.TokenType,
+	}
+	raw := make(map[string]interface{})
+	json.Unmarshal(body, &raw) // no error checks for optional fields
+	token = token.WithExtra(raw)
+
+	if secs := tokenRes.ExpiresIn; secs > 0 {
+		token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
+	}
+	if v := tokenRes.IDToken; v != "" {
+		// decode returned id token to get expiry
+		claimSet, err := jws.Decode(v)
+		if err != nil {
+			return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err)
+		}
+		token.Expiry = time.Unix(claimSet.Exp, 0)
+	}
+	return token, nil
+}
diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go
new file mode 100644
index 00000000..3e4835d7
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/oauth2.go
@@ -0,0 +1,340 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package oauth2 provides support for making
+// OAuth2 authorized and authenticated HTTP requests.
+// It can additionally grant authorization with Bearer JWT.
+package oauth2 // import "golang.org/x/oauth2"
+
+import (
+	"bytes"
+	"errors"
+	"net/http"
+	"net/url"
+	"strings"
+	"sync"
+
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2/internal"
+)
+
+// NoContext is the default context you should supply if not using
+// your own context.Context (see https://golang.org/x/net/context).
+//
+// Deprecated: Use context.Background() or context.TODO() instead.
+var NoContext = context.TODO()
+
+// RegisterBrokenAuthHeaderProvider registers an OAuth2 server
+// identified by the tokenURL prefix as an OAuth2 implementation
+// which doesn't support the HTTP Basic authentication
+// scheme to authenticate with the authorization server.
+// Once a server is registered, credentials (client_id and client_secret)
+// will be passed as query parameters rather than being present
+// in the Authorization header.
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+func RegisterBrokenAuthHeaderProvider(tokenURL string) {
+	internal.RegisterBrokenAuthHeaderProvider(tokenURL)
+}
+
+// Config describes a typical 3-legged OAuth2 flow, with both the
+// client application information and the server's endpoint URLs.
+// For the client credentials 2-legged OAuth2 flow, see the clientcredentials
+// package (https://golang.org/x/oauth2/clientcredentials).
+type Config struct {
+	// ClientID is the application's ID.
+	ClientID string
+
+	// ClientSecret is the application's secret.
+	ClientSecret string
+
+	// Endpoint contains the resource server's token endpoint
+	// URLs. These are constants specific to each server and are
+	// often available via site-specific packages, such as
+	// google.Endpoint or github.Endpoint.
+	Endpoint Endpoint
+
+	// RedirectURL is the URL to redirect users going through
+	// the OAuth flow, after the resource owner's URLs.
+	RedirectURL string
+
+	// Scope specifies optional requested permissions.
+	Scopes []string
+}
+
+// A TokenSource is anything that can return a token.
+type TokenSource interface {
+	// Token returns a token or an error.
+	// Token must be safe for concurrent use by multiple goroutines.
+	// The returned Token must not be modified.
+	Token() (*Token, error)
+}
+
+// Endpoint contains the OAuth 2.0 provider's authorization and token
+// endpoint URLs.
+type Endpoint struct {
+	AuthURL  string
+	TokenURL string
+}
+
+var (
+	// AccessTypeOnline and AccessTypeOffline are options passed
+	// to the Options.AuthCodeURL method. They modify the
+	// "access_type" field that gets sent in the URL returned by
+	// AuthCodeURL.
+	//
+	// Online is the default if neither is specified. If your
+	// application needs to refresh access tokens when the user
+	// is not present at the browser, then use offline. This will
+	// result in your application obtaining a refresh token the
+	// first time your application exchanges an authorization
+	// code for a user.
+	AccessTypeOnline  AuthCodeOption = SetAuthURLParam("access_type", "online")
+	AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
+
+	// ApprovalForce forces the users to view the consent dialog
+	// and confirm the permissions request at the URL returned
+	// from AuthCodeURL, even if they've already done so.
+	ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force")
+)
+
+// An AuthCodeOption is passed to Config.AuthCodeURL.
+type AuthCodeOption interface {
+	setValue(url.Values)
+}
+
+type setParam struct{ k, v string }
+
+func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
+
+// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
+// to a provider's authorization endpoint.
+func SetAuthURLParam(key, value string) AuthCodeOption {
+	return setParam{key, value}
+}
+
+// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
+// that asks for permissions for the required scopes explicitly.
+//
+// State is a token to protect the user from CSRF attacks. You must
+// always provide a non-zero string and validate that it matches the
+// the state query parameter on your redirect callback.
+// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
+//
+// Opts may include AccessTypeOnline or AccessTypeOffline, as well
+// as ApprovalForce.
+func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
+	var buf bytes.Buffer
+	buf.WriteString(c.Endpoint.AuthURL)
+	v := url.Values{
+		"response_type": {"code"},
+		"client_id":     {c.ClientID},
+		"redirect_uri":  internal.CondVal(c.RedirectURL),
+		"scope":         internal.CondVal(strings.Join(c.Scopes, " ")),
+		"state":         internal.CondVal(state),
+	}
+	for _, opt := range opts {
+		opt.setValue(v)
+	}
+	if strings.Contains(c.Endpoint.AuthURL, "?") {
+		buf.WriteByte('&')
+	} else {
+		buf.WriteByte('?')
+	}
+	buf.WriteString(v.Encode())
+	return buf.String()
+}
+
+// PasswordCredentialsToken converts a resource owner username and password
+// pair into a token.
+//
+// Per the RFC, this grant type should only be used "when there is a high
+// degree of trust between the resource owner and the client (e.g., the client
+// is part of the device operating system or a highly privileged application),
+// and when other authorization grant types are not available."
+// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
+//
+// The HTTP client to use is derived from the context.
+// If nil, http.DefaultClient is used.
+func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
+	return retrieveToken(ctx, c, url.Values{
+		"grant_type": {"password"},
+		"username":   {username},
+		"password":   {password},
+		"scope":      internal.CondVal(strings.Join(c.Scopes, " ")),
+	})
+}
+
+// Exchange converts an authorization code into a token.
+//
+// It is used after a resource provider redirects the user back
+// to the Redirect URI (the URL obtained from AuthCodeURL).
+//
+// The HTTP client to use is derived from the context.
+// If a client is not provided via the context, http.DefaultClient is used.
+//
+// The code will be in the *http.Request.FormValue("code"). Before
+// calling Exchange, be sure to validate FormValue("state").
+func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
+	return retrieveToken(ctx, c, url.Values{
+		"grant_type":   {"authorization_code"},
+		"code":         {code},
+		"redirect_uri": internal.CondVal(c.RedirectURL),
+	})
+}
+
+// Client returns an HTTP client using the provided token.
+// The token will auto-refresh as necessary. The underlying
+// HTTP transport will be obtained using the provided context.
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
+	return NewClient(ctx, c.TokenSource(ctx, t))
+}
+
+// TokenSource returns a TokenSource that returns t until t expires,
+// automatically refreshing it as necessary using the provided context.
+//
+// Most users will use Config.Client instead.
+func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
+	tkr := &tokenRefresher{
+		ctx:  ctx,
+		conf: c,
+	}
+	if t != nil {
+		tkr.refreshToken = t.RefreshToken
+	}
+	return &reuseTokenSource{
+		t:   t,
+		new: tkr,
+	}
+}
+
+// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
+// HTTP requests to renew a token using a RefreshToken.
+type tokenRefresher struct {
+	ctx          context.Context // used to get HTTP requests
+	conf         *Config
+	refreshToken string
+}
+
+// WARNING: Token is not safe for concurrent access, as it
+// updates the tokenRefresher's refreshToken field.
+// Within this package, it is used by reuseTokenSource which
+// synchronizes calls to this method with its own mutex.
+func (tf *tokenRefresher) Token() (*Token, error) {
+	if tf.refreshToken == "" {
+		return nil, errors.New("oauth2: token expired and refresh token is not set")
+	}
+
+	tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
+		"grant_type":    {"refresh_token"},
+		"refresh_token": {tf.refreshToken},
+	})
+
+	if err != nil {
+		return nil, err
+	}
+	if tf.refreshToken != tk.RefreshToken {
+		tf.refreshToken = tk.RefreshToken
+	}
+	return tk, err
+}
+
+// reuseTokenSource is a TokenSource that holds a single token in memory
+// and validates its expiry before each call to retrieve it with
+// Token. If it's expired, it will be auto-refreshed using the
+// new TokenSource.
+type reuseTokenSource struct {
+	new TokenSource // called when t is expired.
+
+	mu sync.Mutex // guards t
+	t  *Token
+}
+
+// Token returns the current token if it's still valid, else will
+// refresh the current token (using r.Context for HTTP client
+// information) and return the new one.
+func (s *reuseTokenSource) Token() (*Token, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if s.t.Valid() {
+		return s.t, nil
+	}
+	t, err := s.new.Token()
+	if err != nil {
+		return nil, err
+	}
+	s.t = t
+	return t, nil
+}
+
+// StaticTokenSource returns a TokenSource that always returns the same token.
+// Because the provided token t is never refreshed, StaticTokenSource is only
+// useful for tokens that never expire.
+func StaticTokenSource(t *Token) TokenSource {
+	return staticTokenSource{t}
+}
+
+// staticTokenSource is a TokenSource that always returns the same Token.
+type staticTokenSource struct {
+	t *Token
+}
+
+func (s staticTokenSource) Token() (*Token, error) {
+	return s.t, nil
+}
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient internal.ContextKey
+
+// NewClient creates an *http.Client from a Context and TokenSource.
+// The returned client is not valid beyond the lifetime of the context.
+//
+// As a special case, if src is nil, a non-OAuth2 client is returned
+// using the provided context. This exists to support related OAuth2
+// packages.
+func NewClient(ctx context.Context, src TokenSource) *http.Client {
+	if src == nil {
+		c, err := internal.ContextClient(ctx)
+		if err != nil {
+			return &http.Client{Transport: internal.ErrorTransport{Err: err}}
+		}
+		return c
+	}
+	return &http.Client{
+		Transport: &Transport{
+			Base:   internal.ContextTransport(ctx),
+			Source: ReuseTokenSource(nil, src),
+		},
+	}
+}
+
+// ReuseTokenSource returns a TokenSource which repeatedly returns the
+// same token as long as it's valid, starting with t.
+// When its cached token is invalid, a new token is obtained from src.
+//
+// ReuseTokenSource is typically used to reuse tokens from a cache
+// (such as a file on disk) between runs of a program, rather than
+// obtaining new tokens unnecessarily.
+//
+// The initial token t may be nil, in which case the TokenSource is
+// wrapped in a caching version if it isn't one already. This also
+// means it's always safe to wrap ReuseTokenSource around any other
+// TokenSource without adverse effects.
+func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
+	// Don't wrap a reuseTokenSource in itself. That would work,
+	// but cause an unnecessary number of mutex operations.
+	// Just build the equivalent one.
+	if rt, ok := src.(*reuseTokenSource); ok {
+		if t == nil {
+			// Just use it directly.
+			return rt
+		}
+		src = rt.new
+	}
+	return &reuseTokenSource{
+		t:   t,
+		new: src,
+	}
+}
diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go
new file mode 100644
index 00000000..7a3167f1
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/token.go
@@ -0,0 +1,158 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2/internal"
+)
+
+// expiryDelta determines how earlier a token should be considered
+// expired than its actual expiration time. It is used to avoid late
+// expirations due to client-server time mismatches.
+const expiryDelta = 10 * time.Second
+
+// Token represents the crendentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// Most users of this package should not access fields of Token
+// directly. They're exported mostly for use by related packages
+// implementing derivative OAuth2 flows.
+type Token struct {
+	// AccessToken is the token that authorizes and authenticates
+	// the requests.
+	AccessToken string `json:"access_token"`
+
+	// TokenType is the type of token.
+	// The Type method returns either this or "Bearer", the default.
+	TokenType string `json:"token_type,omitempty"`
+
+	// RefreshToken is a token that's used by the application
+	// (as opposed to the user) to refresh the access token
+	// if it expires.
+	RefreshToken string `json:"refresh_token,omitempty"`
+
+	// Expiry is the optional expiration time of the access token.
+	//
+	// If zero, TokenSource implementations will reuse the same
+	// token forever and RefreshToken or equivalent
+	// mechanisms for that TokenSource will not be used.
+	Expiry time.Time `json:"expiry,omitempty"`
+
+	// raw optionally contains extra metadata from the server
+	// when updating a token.
+	raw interface{}
+}
+
+// Type returns t.TokenType if non-empty, else "Bearer".
+func (t *Token) Type() string {
+	if strings.EqualFold(t.TokenType, "bearer") {
+		return "Bearer"
+	}
+	if strings.EqualFold(t.TokenType, "mac") {
+		return "MAC"
+	}
+	if strings.EqualFold(t.TokenType, "basic") {
+		return "Basic"
+	}
+	if t.TokenType != "" {
+		return t.TokenType
+	}
+	return "Bearer"
+}
+
+// SetAuthHeader sets the Authorization header to r using the access
+// token in t.
+//
+// This method is unnecessary when using Transport or an HTTP Client
+// returned by this package.
+func (t *Token) SetAuthHeader(r *http.Request) {
+	r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
+}
+
+// WithExtra returns a new Token that's a clone of t, but using the
+// provided raw extra map. This is only intended for use by packages
+// implementing derivative OAuth2 flows.
+func (t *Token) WithExtra(extra interface{}) *Token {
+	t2 := new(Token)
+	*t2 = *t
+	t2.raw = extra
+	return t2
+}
+
+// Extra returns an extra field.
+// Extra fields are key-value pairs returned by the server as a
+// part of the token retrieval response.
+func (t *Token) Extra(key string) interface{} {
+	if raw, ok := t.raw.(map[string]interface{}); ok {
+		return raw[key]
+	}
+
+	vals, ok := t.raw.(url.Values)
+	if !ok {
+		return nil
+	}
+
+	v := vals.Get(key)
+	switch s := strings.TrimSpace(v); strings.Count(s, ".") {
+	case 0: // Contains no "."; try to parse as int
+		if i, err := strconv.ParseInt(s, 10, 64); err == nil {
+			return i
+		}
+	case 1: // Contains a single "."; try to parse as float
+		if f, err := strconv.ParseFloat(s, 64); err == nil {
+			return f
+		}
+	}
+
+	return v
+}
+
+// expired reports whether the token is expired.
+// t must be non-nil.
+func (t *Token) expired() bool {
+	if t.Expiry.IsZero() {
+		return false
+	}
+	return t.Expiry.Add(-expiryDelta).Before(time.Now())
+}
+
+// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
+func (t *Token) Valid() bool {
+	return t != nil && t.AccessToken != "" && !t.expired()
+}
+
+// tokenFromInternal maps an *internal.Token struct into
+// a *Token struct.
+func tokenFromInternal(t *internal.Token) *Token {
+	if t == nil {
+		return nil
+	}
+	return &Token{
+		AccessToken:  t.AccessToken,
+		TokenType:    t.TokenType,
+		RefreshToken: t.RefreshToken,
+		Expiry:       t.Expiry,
+		raw:          t.Raw,
+	}
+}
+
+// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
+// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
+// with an error..
+func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
+	tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
+	if err != nil {
+		return nil, err
+	}
+	return tokenFromInternal(tk), nil
+}
diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go
new file mode 100644
index 00000000..92ac7e25
--- /dev/null
+++ b/vendor/golang.org/x/oauth2/transport.go
@@ -0,0 +1,132 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+	"errors"
+	"io"
+	"net/http"
+	"sync"
+)
+
+// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
+// wrapping a base RoundTripper and adding an Authorization header
+// with a token from the supplied Sources.
+//
+// Transport is a low-level mechanism. Most code will use the
+// higher-level Config.Client method instead.
+type Transport struct {
+	// Source supplies the token to add to outgoing requests'
+	// Authorization headers.
+	Source TokenSource
+
+	// Base is the base RoundTripper used to make HTTP requests.
+	// If nil, http.DefaultTransport is used.
+	Base http.RoundTripper
+
+	mu     sync.Mutex                      // guards modReq
+	modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token. If no token exists or token is expired,
+// tries to refresh/fetch a new token.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+	if t.Source == nil {
+		return nil, errors.New("oauth2: Transport's Source is nil")
+	}
+	token, err := t.Source.Token()
+	if err != nil {
+		return nil, err
+	}
+
+	req2 := cloneRequest(req) // per RoundTripper contract
+	token.SetAuthHeader(req2)
+	t.setModReq(req, req2)
+	res, err := t.base().RoundTrip(req2)
+	if err != nil {
+		t.setModReq(req, nil)
+		return nil, err
+	}
+	res.Body = &onEOFReader{
+		rc: res.Body,
+		fn: func() { t.setModReq(req, nil) },
+	}
+	return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *Transport) CancelRequest(req *http.Request) {
+	type canceler interface {
+		CancelRequest(*http.Request)
+	}
+	if cr, ok := t.base().(canceler); ok {
+		t.mu.Lock()
+		modReq := t.modReq[req]
+		delete(t.modReq, req)
+		t.mu.Unlock()
+		cr.CancelRequest(modReq)
+	}
+}
+
+func (t *Transport) base() http.RoundTripper {
+	if t.Base != nil {
+		return t.Base
+	}
+	return http.DefaultTransport
+}
+
+func (t *Transport) setModReq(orig, mod *http.Request) {
+	t.mu.Lock()
+	defer t.mu.Unlock()
+	if t.modReq == nil {
+		t.modReq = make(map[*http.Request]*http.Request)
+	}
+	if mod == nil {
+		delete(t.modReq, orig)
+	} else {
+		t.modReq[orig] = mod
+	}
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+	// shallow copy of the struct
+	r2 := new(http.Request)
+	*r2 = *r
+	// deep copy of the Header
+	r2.Header = make(http.Header, len(r.Header))
+	for k, s := range r.Header {
+		r2.Header[k] = append([]string(nil), s...)
+	}
+	return r2
+}
+
+type onEOFReader struct {
+	rc io.ReadCloser
+	fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+	n, err = r.rc.Read(p)
+	if err == io.EOF {
+		r.runFunc()
+	}
+	return
+}
+
+func (r *onEOFReader) Close() error {
+	err := r.rc.Close()
+	r.runFunc()
+	return err
+}
+
+func (r *onEOFReader) runFunc() {
+	if fn := r.fn; fn != nil {
+		fn()
+		r.fn = nil
+	}
+}
diff --git a/vendor/google.golang.org/appengine/CONTRIBUTING.md b/vendor/google.golang.org/appengine/CONTRIBUTING.md
new file mode 100644
index 00000000..ffc29852
--- /dev/null
+++ b/vendor/google.golang.org/appengine/CONTRIBUTING.md
@@ -0,0 +1,90 @@
+# Contributing
+
+1. Sign one of the contributor license agreements below.
+1. Get the package:
+
+    `go get -d google.golang.org/appengine`
+1. Change into the checked out source:
+
+    `cd $GOPATH/src/google.golang.org/appengine`
+1. Fork the repo.
+1. Set your fork as a remote:
+
+    `git remote add fork git@github.com:GITHUB_USERNAME/appengine.git`
+1. Make changes, commit to your fork.
+1. Send a pull request with your changes. 
+   The first line of your commit message is conventionally a one-line summary of the change, prefixed by the primary affected package, and is used as the title of your pull request.
+
+# Testing
+
+## Running system tests
+
+Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`.
+
+Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`.
+
+Run tests with `goapp test`:
+
+```
+goapp test -v google.golang.org/appengine/...
+```
+
+## Contributor License Agreements
+
+Before we can accept your pull requests you'll need to sign a Contributor
+License Agreement (CLA):
+
+- **If you are an individual writing original source code** and **you own the
+intellectual property**, then you'll need to sign an [individual CLA][indvcla].
+- **If you work for a company that wants to allow you to contribute your work**,
+then you'll need to sign a [corporate CLA][corpcla].
+
+You can sign these electronically (just scroll to the bottom). After that,
+we'll be able to accept your pull requests.
+
+## Contributor Code of Conduct
+
+As contributors and maintainers of this project,
+and in the interest of fostering an open and welcoming community,
+we pledge to respect all people who contribute through reporting issues,
+posting feature requests, updating documentation,
+submitting pull requests or patches, and other activities.
+
+We are committed to making participation in this project
+a harassment-free experience for everyone,
+regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance,
+body size, race, ethnicity, age, religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery
+* Personal attacks
+* Trolling or insulting/derogatory comments
+* Public or private harassment
+* Publishing other's private information,
+such as physical or electronic
+addresses, without explicit permission
+* Other unethical or unprofessional conduct.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct.
+By adopting this Code of Conduct,
+project maintainers commit themselves to fairly and consistently
+applying these principles to every aspect of managing this project.
+Project maintainers who do not follow or enforce the Code of Conduct
+may be permanently removed from the project team.
+
+This code of conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior
+may be reported by opening an issue
+or contacting one or more of the project maintainers.
+
+This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
+available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
+
+[indvcla]: https://developers.google.com/open-source/cla/individual
+[corpcla]: https://developers.google.com/open-source/cla/corporate
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/google.golang.org/appengine/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md
new file mode 100644
index 00000000..d86768a2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/README.md
@@ -0,0 +1,73 @@
+# Go App Engine packages
+
+[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine)
+
+This repository supports the Go runtime on *App Engine standard*.
+It provides APIs for interacting with App Engine services.
+Its canonical import path is `google.golang.org/appengine`.
+
+See https://cloud.google.com/appengine/docs/go/
+for more information.
+
+File issue reports and feature requests on the [GitHub's issue
+tracker](https://github.com/golang/appengine/issues).
+
+## Upgrading an App Engine app to the flexible environment
+
+This package does not work on *App Engine flexible*.
+
+There are many differences between the App Engine standard environment and
+the flexible environment.
+
+See the [documentation on upgrading to the flexible environment](https://cloud.google.com/appengine/docs/flexible/go/upgrading).
+
+## Directory structure
+
+The top level directory of this repository is the `appengine` package. It
+contains the
+basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API
+packages are in subdirectories (e.g. `datastore`).
+
+There is an `internal` subdirectory that contains service protocol buffers,
+plus packages required for connectivity to make API calls. App Engine apps
+should not directly import any package under `internal`.
+
+## Updating from legacy (`import "appengine"`) packages
+
+If you're currently using the bare `appengine` packages
+(that is, not these ones, imported via `google.golang.org/appengine`),
+then you can use the `aefix` tool to help automate an upgrade to these packages.
+
+Run `go get google.golang.org/appengine/cmd/aefix` to install it.
+
+### 1. Update import paths
+
+The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.
+You will need to update your code to use import paths starting with that; for instance,
+code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.
+
+### 2. Update code using deprecated, removed or modified APIs
+
+Most App Engine services are available with exactly the same API.
+A few APIs were cleaned up, and there are some differences:
+
+* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`.
+* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.
+* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.
+* `appengine.Datacenter` now takes a `context.Context` argument.
+* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.
+* `delay.Call` now returns an error.
+* `search.FieldLoadSaver` now handles document metadata.
+* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the
+  `context.Context` instead.
+* `aetest` no longer declares its own Context type, and uses the standard one instead.
+* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been
+  deprecated and unused for a long time.
+* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.
+  Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.
+* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated.
+  Use [Google Cloud Storage](https://godoc.org/cloud.google.com/go/storage) if the
+  feature you require is not present in the new
+  [blobstore package](https://google.golang.org/appengine/blobstore).
+* `appengine/socket` is not required on App Engine flexible environment / Managed VMs.
+  Use the standard `net` package instead.
diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go
new file mode 100644
index 00000000..d4f80844
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine.go
@@ -0,0 +1,113 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package appengine provides basic functionality for Google App Engine.
+//
+// For more information on how to write Go apps for Google App Engine, see:
+// https://cloud.google.com/appengine/docs/go/
+package appengine // import "google.golang.org/appengine"
+
+import (
+	"net/http"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+)
+
+// The gophers party all night; the rabbits provide the beats.
+
+// Main is the principal entry point for an app running in App Engine.
+//
+// On App Engine Flexible it installs a trivial health checker if one isn't
+// already registered, and starts listening on port 8080 (overridden by the
+// $PORT environment variable).
+//
+// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests
+// for details on how to do your own health checking.
+//
+// On App Engine Standard it ensures the server has started and is prepared to
+// receive requests.
+//
+// Main never returns.
+//
+// Main is designed so that the app's main package looks like this:
+//
+//      package main
+//
+//      import (
+//              "google.golang.org/appengine"
+//
+//              _ "myapp/package0"
+//              _ "myapp/package1"
+//      )
+//
+//      func main() {
+//              appengine.Main()
+//      }
+//
+// The "myapp/packageX" packages are expected to register HTTP handlers
+// in their init functions.
+func Main() {
+	internal.Main()
+}
+
+// IsDevAppServer reports whether the App Engine app is running in the
+// development App Server.
+func IsDevAppServer() bool {
+	return internal.IsDevAppServer()
+}
+
+// NewContext returns a context for an in-flight HTTP request.
+// This function is cheap.
+func NewContext(req *http.Request) context.Context {
+	return WithContext(context.Background(), req)
+}
+
+// WithContext returns a copy of the parent context
+// and associates it with an in-flight HTTP request.
+// This function is cheap.
+func WithContext(parent context.Context, req *http.Request) context.Context {
+	return internal.WithContext(parent, req)
+}
+
+// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call.
+
+// BlobKey is a key for a blobstore blob.
+//
+// Conceptually, this type belongs in the blobstore package, but it lives in
+// the appengine package to avoid a circular dependency: blobstore depends on
+// datastore, and datastore needs to refer to the BlobKey type.
+type BlobKey string
+
+// GeoPoint represents a location as latitude/longitude in degrees.
+type GeoPoint struct {
+	Lat, Lng float64
+}
+
+// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
+func (g GeoPoint) Valid() bool {
+	return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
+}
+
+// APICallFunc defines a function type for handling an API call.
+// See WithCallOverride.
+type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error
+
+// WithAPICallFunc returns a copy of the parent context
+// that will cause API calls to invoke f instead of their normal operation.
+//
+// This is intended for advanced users only.
+func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context {
+	return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f))
+}
+
+// APICall performs an API call.
+//
+// This is not intended for general use; it is exported for use in conjunction
+// with WithAPICallFunc.
+func APICall(ctx context.Context, service, method string, in, out proto.Message) error {
+	return internal.Call(ctx, service, method, in, out)
+}
diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go
new file mode 100644
index 00000000..f4b645aa
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine_vm.go
@@ -0,0 +1,20 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package appengine
+
+import (
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+)
+
+// BackgroundContext returns a context not associated with a request.
+// This should only be used when not servicing a request.
+// This only works in App Engine "flexible environment".
+func BackgroundContext() context.Context {
+	return internal.BackgroundContext()
+}
diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go
new file mode 100644
index 00000000..16d0772e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/errors.go
@@ -0,0 +1,46 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// This file provides error functions for common API failure modes.
+
+package appengine
+
+import (
+	"fmt"
+
+	"google.golang.org/appengine/internal"
+)
+
+// IsOverQuota reports whether err represents an API call failure
+// due to insufficient available quota.
+func IsOverQuota(err error) bool {
+	callErr, ok := err.(*internal.CallError)
+	return ok && callErr.Code == 4
+}
+
+// MultiError is returned by batch operations when there are errors with
+// particular elements. Errors will be in a one-to-one correspondence with
+// the input elements; successful elements will have a nil entry.
+type MultiError []error
+
+func (m MultiError) Error() string {
+	s, n := "", 0
+	for _, e := range m {
+		if e != nil {
+			if n == 0 {
+				s = e.Error()
+			}
+			n++
+		}
+	}
+	switch n {
+	case 0:
+		return "(0 errors)"
+	case 1:
+		return s
+	case 2:
+		return s + " (and 1 other error)"
+	}
+	return fmt.Sprintf("%s (and %d other errors)", s, n-1)
+}
diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go
new file mode 100644
index 00000000..b8dcf8f3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/identity.go
@@ -0,0 +1,142 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+	"time"
+
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+	pb "google.golang.org/appengine/internal/app_identity"
+	modpb "google.golang.org/appengine/internal/modules"
+)
+
+// AppID returns the application ID for the current application.
+// The string will be a plain application ID (e.g. "appid"), with a
+// domain prefix for custom domain deployments (e.g. "example.com:appid").
+func AppID(c context.Context) string { return internal.AppID(c) }
+
+// DefaultVersionHostname returns the standard hostname of the default version
+// of the current application (e.g. "my-app.appspot.com"). This is suitable for
+// use in constructing URLs.
+func DefaultVersionHostname(c context.Context) string {
+	return internal.DefaultVersionHostname(c)
+}
+
+// ModuleName returns the module name of the current instance.
+func ModuleName(c context.Context) string {
+	return internal.ModuleName(c)
+}
+
+// ModuleHostname returns a hostname of a module instance.
+// If module is the empty string, it refers to the module of the current instance.
+// If version is empty, it refers to the version of the current instance if valid,
+// or the default version of the module of the current instance.
+// If instance is empty, ModuleHostname returns the load-balancing hostname.
+func ModuleHostname(c context.Context, module, version, instance string) (string, error) {
+	req := &modpb.GetHostnameRequest{}
+	if module != "" {
+		req.Module = &module
+	}
+	if version != "" {
+		req.Version = &version
+	}
+	if instance != "" {
+		req.Instance = &instance
+	}
+	res := &modpb.GetHostnameResponse{}
+	if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil {
+		return "", err
+	}
+	return *res.Hostname, nil
+}
+
+// VersionID returns the version ID for the current application.
+// It will be of the form "X.Y", where X is specified in app.yaml,
+// and Y is a number generated when each version of the app is uploaded.
+// It does not include a module name.
+func VersionID(c context.Context) string { return internal.VersionID(c) }
+
+// InstanceID returns a mostly-unique identifier for this instance.
+func InstanceID() string { return internal.InstanceID() }
+
+// Datacenter returns an identifier for the datacenter that the instance is running in.
+func Datacenter(c context.Context) string { return internal.Datacenter(c) }
+
+// ServerSoftware returns the App Engine release version.
+// In production, it looks like "Google App Engine/X.Y.Z".
+// In the development appserver, it looks like "Development/X.Y".
+func ServerSoftware() string { return internal.ServerSoftware() }
+
+// RequestID returns a string that uniquely identifies the request.
+func RequestID(c context.Context) string { return internal.RequestID(c) }
+
+// AccessToken generates an OAuth2 access token for the specified scopes on
+// behalf of service account of this application. This token will expire after
+// the returned time.
+func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) {
+	req := &pb.GetAccessTokenRequest{Scope: scopes}
+	res := &pb.GetAccessTokenResponse{}
+
+	err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res)
+	if err != nil {
+		return "", time.Time{}, err
+	}
+	return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil
+}
+
+// Certificate represents a public certificate for the app.
+type Certificate struct {
+	KeyName string
+	Data    []byte // PEM-encoded X.509 certificate
+}
+
+// PublicCertificates retrieves the public certificates for the app.
+// They can be used to verify a signature returned by SignBytes.
+func PublicCertificates(c context.Context) ([]Certificate, error) {
+	req := &pb.GetPublicCertificateForAppRequest{}
+	res := &pb.GetPublicCertificateForAppResponse{}
+	if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil {
+		return nil, err
+	}
+	var cs []Certificate
+	for _, pc := range res.PublicCertificateList {
+		cs = append(cs, Certificate{
+			KeyName: pc.GetKeyName(),
+			Data:    []byte(pc.GetX509CertificatePem()),
+		})
+	}
+	return cs, nil
+}
+
+// ServiceAccount returns a string representing the service account name, in
+// the form of an email address (typically app_id@appspot.gserviceaccount.com).
+func ServiceAccount(c context.Context) (string, error) {
+	req := &pb.GetServiceAccountNameRequest{}
+	res := &pb.GetServiceAccountNameResponse{}
+
+	err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res)
+	if err != nil {
+		return "", err
+	}
+	return res.GetServiceAccountName(), err
+}
+
+// SignBytes signs bytes using a private key unique to your application.
+func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) {
+	req := &pb.SignForAppRequest{BytesToSign: bytes}
+	res := &pb.SignForAppResponse{}
+
+	if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil {
+		return "", nil, err
+	}
+	return res.GetKeyName(), res.GetSignatureBytes(), nil
+}
+
+func init() {
+	internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name)
+	internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
new file mode 100644
index 00000000..efee0609
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -0,0 +1,677 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	netcontext "golang.org/x/net/context"
+
+	basepb "google.golang.org/appengine/internal/base"
+	logpb "google.golang.org/appengine/internal/log"
+	remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const (
+	apiPath             = "/rpc_http"
+	defaultTicketSuffix = "/default.20150612t184001.0"
+)
+
+var (
+	// Incoming headers.
+	ticketHeader       = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
+	dapperHeader       = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
+	traceHeader        = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
+	curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+	userIPHeader       = http.CanonicalHeaderKey("X-AppEngine-User-IP")
+	remoteAddrHeader   = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
+
+	// Outgoing headers.
+	apiEndpointHeader      = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
+	apiEndpointHeaderValue = []string{"app-engine-apis"}
+	apiMethodHeader        = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
+	apiMethodHeaderValue   = []string{"/VMRemoteAPI.CallRemoteAPI"}
+	apiDeadlineHeader      = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
+	apiContentType         = http.CanonicalHeaderKey("Content-Type")
+	apiContentTypeValue    = []string{"application/octet-stream"}
+	logFlushHeader         = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
+
+	apiHTTPClient = &http.Client{
+		Transport: &http.Transport{
+			Proxy: http.ProxyFromEnvironment,
+			Dial:  limitDial,
+		},
+	}
+
+	defaultTicketOnce sync.Once
+	defaultTicket     string
+)
+
+func apiURL() *url.URL {
+	host, port := "appengine.googleapis.internal", "10001"
+	if h := os.Getenv("API_HOST"); h != "" {
+		host = h
+	}
+	if p := os.Getenv("API_PORT"); p != "" {
+		port = p
+	}
+	return &url.URL{
+		Scheme: "http",
+		Host:   host + ":" + port,
+		Path:   apiPath,
+	}
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+	c := &context{
+		req:       r,
+		outHeader: w.Header(),
+		apiURL:    apiURL(),
+	}
+	stopFlushing := make(chan int)
+
+	ctxs.Lock()
+	ctxs.m[r] = c
+	ctxs.Unlock()
+	defer func() {
+		ctxs.Lock()
+		delete(ctxs.m, r)
+		ctxs.Unlock()
+	}()
+
+	// Patch up RemoteAddr so it looks reasonable.
+	if addr := r.Header.Get(userIPHeader); addr != "" {
+		r.RemoteAddr = addr
+	} else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+		r.RemoteAddr = addr
+	} else {
+		// Should not normally reach here, but pick a sensible default anyway.
+		r.RemoteAddr = "127.0.0.1"
+	}
+	// The address in the headers will most likely be of these forms:
+	//	123.123.123.123
+	//	2001:db8::1
+	// net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+	if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+		// Assume the remote address is only a host; add a default port.
+		r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+	}
+
+	// Start goroutine responsible for flushing app logs.
+	// This is done after adding c to ctx.m (and stopped before removing it)
+	// because flushing logs requires making an API call.
+	go c.logFlusher(stopFlushing)
+
+	executeRequestSafely(c, r)
+	c.outHeader = nil // make sure header changes aren't respected any more
+
+	stopFlushing <- 1 // any logging beyond this point will be dropped
+
+	// Flush any pending logs asynchronously.
+	c.pendingLogs.Lock()
+	flushes := c.pendingLogs.flushes
+	if len(c.pendingLogs.lines) > 0 {
+		flushes++
+	}
+	c.pendingLogs.Unlock()
+	go c.flushLog(false)
+	w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+
+	// Avoid nil Write call if c.Write is never called.
+	if c.outCode != 0 {
+		w.WriteHeader(c.outCode)
+	}
+	if c.outBody != nil {
+		w.Write(c.outBody)
+	}
+}
+
+func executeRequestSafely(c *context, r *http.Request) {
+	defer func() {
+		if x := recover(); x != nil {
+			logf(c, 4, "%s", renderPanic(x)) // 4 == critical
+			c.outCode = 500
+		}
+	}()
+
+	http.DefaultServeMux.ServeHTTP(c, r)
+}
+
+func renderPanic(x interface{}) string {
+	buf := make([]byte, 16<<10) // 16 KB should be plenty
+	buf = buf[:runtime.Stack(buf, false)]
+
+	// Remove the first few stack frames:
+	//   this func
+	//   the recover closure in the caller
+	// That will root the stack trace at the site of the panic.
+	const (
+		skipStart  = "internal.renderPanic"
+		skipFrames = 2
+	)
+	start := bytes.Index(buf, []byte(skipStart))
+	p := start
+	for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
+		p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
+		if p < 0 {
+			break
+		}
+	}
+	if p >= 0 {
+		// buf[start:p+1] is the block to remove.
+		// Copy buf[p+1:] over buf[start:] and shrink buf.
+		copy(buf[start:], buf[p+1:])
+		buf = buf[:len(buf)-(p+1-start)]
+	}
+
+	// Add panic heading.
+	head := fmt.Sprintf("panic: %v\n\n", x)
+	if len(head) > len(buf) {
+		// Extremely unlikely to happen.
+		return head
+	}
+	copy(buf[len(head):], buf)
+	copy(buf, head)
+
+	return string(buf)
+}
+
+var ctxs = struct {
+	sync.Mutex
+	m  map[*http.Request]*context
+	bg *context // background context, lazily initialized
+	// dec is used by tests to decorate the netcontext.Context returned
+	// for a given request. This allows tests to add overrides (such as
+	// WithAppIDOverride) to the context. The map is nil outside tests.
+	dec map[*http.Request]func(netcontext.Context) netcontext.Context
+}{
+	m: make(map[*http.Request]*context),
+}
+
+// context represents the context of an in-flight HTTP request.
+// It implements the appengine.Context and http.ResponseWriter interfaces.
+type context struct {
+	req *http.Request
+
+	outCode   int
+	outHeader http.Header
+	outBody   []byte
+
+	pendingLogs struct {
+		sync.Mutex
+		lines   []*logpb.UserAppLogLine
+		flushes int
+	}
+
+	apiURL *url.URL
+}
+
+var contextKey = "holds a *context"
+
+// fromContext returns the App Engine context or nil if ctx is not
+// derived from an App Engine context.
+func fromContext(ctx netcontext.Context) *context {
+	c, _ := ctx.Value(&contextKey).(*context)
+	return c
+}
+
+func withContext(parent netcontext.Context, c *context) netcontext.Context {
+	ctx := netcontext.WithValue(parent, &contextKey, c)
+	if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
+		ctx = withNamespace(ctx, ns)
+	}
+	return ctx
+}
+
+func toContext(c *context) netcontext.Context {
+	return withContext(netcontext.Background(), c)
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+	if c := fromContext(ctx); c != nil {
+		return c.req.Header
+	}
+	return nil
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+	ctxs.Lock()
+	c := ctxs.m[req]
+	d := ctxs.dec[req]
+	ctxs.Unlock()
+
+	if d != nil {
+		parent = d(parent)
+	}
+
+	if c == nil {
+		// Someone passed in an http.Request that is not in-flight.
+		// We panic here rather than panicking at a later point
+		// so that stack traces will be more sensible.
+		log.Panic("appengine: NewContext passed an unknown http.Request")
+	}
+	return withContext(parent, c)
+}
+
+// DefaultTicket returns a ticket used for background context or dev_appserver.
+func DefaultTicket() string {
+	defaultTicketOnce.Do(func() {
+		if IsDevAppServer() {
+			defaultTicket = "testapp" + defaultTicketSuffix
+			return
+		}
+		appID := partitionlessAppID()
+		escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
+		majVersion := VersionID(nil)
+		if i := strings.Index(majVersion, "."); i > 0 {
+			majVersion = majVersion[:i]
+		}
+		defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
+	})
+	return defaultTicket
+}
+
+func BackgroundContext() netcontext.Context {
+	ctxs.Lock()
+	defer ctxs.Unlock()
+
+	if ctxs.bg != nil {
+		return toContext(ctxs.bg)
+	}
+
+	// Compute background security ticket.
+	ticket := DefaultTicket()
+
+	ctxs.bg = &context{
+		req: &http.Request{
+			Header: http.Header{
+				ticketHeader: []string{ticket},
+			},
+		},
+		apiURL: apiURL(),
+	}
+
+	// TODO(dsymonds): Wire up the shutdown handler to do a final flush.
+	go ctxs.bg.logFlusher(make(chan int))
+
+	return toContext(ctxs.bg)
+}
+
+// RegisterTestRequest registers the HTTP request req for testing, such that
+// any API calls are sent to the provided URL. It returns a closure to delete
+// the registration.
+// It should only be used by aetest package.
+func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() {
+	c := &context{
+		req:    req,
+		apiURL: apiURL,
+	}
+	ctxs.Lock()
+	defer ctxs.Unlock()
+	if _, ok := ctxs.m[req]; ok {
+		log.Panic("req already associated with context")
+	}
+	if _, ok := ctxs.dec[req]; ok {
+		log.Panic("req already associated with context")
+	}
+	if ctxs.dec == nil {
+		ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context)
+	}
+	ctxs.m[req] = c
+	ctxs.dec[req] = decorate
+
+	return func() {
+		ctxs.Lock()
+		delete(ctxs.m, req)
+		delete(ctxs.dec, req)
+		ctxs.Unlock()
+	}
+}
+
+var errTimeout = &CallError{
+	Detail:  "Deadline exceeded",
+	Code:    int32(remotepb.RpcError_CANCELLED),
+	Timeout: true,
+}
+
+func (c *context) Header() http.Header { return c.outHeader }
+
+// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
+// codes do not permit a response body (nor response entity headers such as
+// Content-Length, Content-Type, etc).
+func bodyAllowedForStatus(status int) bool {
+	switch {
+	case status >= 100 && status <= 199:
+		return false
+	case status == 204:
+		return false
+	case status == 304:
+		return false
+	}
+	return true
+}
+
+func (c *context) Write(b []byte) (int, error) {
+	if c.outCode == 0 {
+		c.WriteHeader(http.StatusOK)
+	}
+	if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
+		return 0, http.ErrBodyNotAllowed
+	}
+	c.outBody = append(c.outBody, b...)
+	return len(b), nil
+}
+
+func (c *context) WriteHeader(code int) {
+	if c.outCode != 0 {
+		logf(c, 3, "WriteHeader called multiple times on request.") // error level
+		return
+	}
+	c.outCode = code
+}
+
+func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
+	hreq := &http.Request{
+		Method: "POST",
+		URL:    c.apiURL,
+		Header: http.Header{
+			apiEndpointHeader: apiEndpointHeaderValue,
+			apiMethodHeader:   apiMethodHeaderValue,
+			apiContentType:    apiContentTypeValue,
+			apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
+		},
+		Body:          ioutil.NopCloser(bytes.NewReader(body)),
+		ContentLength: int64(len(body)),
+		Host:          c.apiURL.Host,
+	}
+	if info := c.req.Header.Get(dapperHeader); info != "" {
+		hreq.Header.Set(dapperHeader, info)
+	}
+	if info := c.req.Header.Get(traceHeader); info != "" {
+		hreq.Header.Set(traceHeader, info)
+	}
+
+	tr := apiHTTPClient.Transport.(*http.Transport)
+
+	var timedOut int32 // atomic; set to 1 if timed out
+	t := time.AfterFunc(timeout, func() {
+		atomic.StoreInt32(&timedOut, 1)
+		tr.CancelRequest(hreq)
+	})
+	defer t.Stop()
+	defer func() {
+		// Check if timeout was exceeded.
+		if atomic.LoadInt32(&timedOut) != 0 {
+			err = errTimeout
+		}
+	}()
+
+	hresp, err := apiHTTPClient.Do(hreq)
+	if err != nil {
+		return nil, &CallError{
+			Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
+			Code:   int32(remotepb.RpcError_UNKNOWN),
+		}
+	}
+	defer hresp.Body.Close()
+	hrespBody, err := ioutil.ReadAll(hresp.Body)
+	if hresp.StatusCode != 200 {
+		return nil, &CallError{
+			Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
+			Code:   int32(remotepb.RpcError_UNKNOWN),
+		}
+	}
+	if err != nil {
+		return nil, &CallError{
+			Detail: fmt.Sprintf("service bridge response bad: %v", err),
+			Code:   int32(remotepb.RpcError_UNKNOWN),
+		}
+	}
+	return hrespBody, nil
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+	if ns := NamespaceFromContext(ctx); ns != "" {
+		if fn, ok := NamespaceMods[service]; ok {
+			fn(in, ns)
+		}
+	}
+
+	if f, ctx, ok := callOverrideFromContext(ctx); ok {
+		return f(ctx, service, method, in, out)
+	}
+
+	// Handle already-done contexts quickly.
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+
+	c := fromContext(ctx)
+	if c == nil {
+		// Give a good error message rather than a panic lower down.
+		return errNotAppEngineContext
+	}
+
+	// Apply transaction modifications if we're in a transaction.
+	if t := transactionFromContext(ctx); t != nil {
+		if t.finished {
+			return errors.New("transaction context has expired")
+		}
+		applyTransaction(in, &t.transaction)
+	}
+
+	// Default RPC timeout is 60s.
+	timeout := 60 * time.Second
+	if deadline, ok := ctx.Deadline(); ok {
+		timeout = deadline.Sub(time.Now())
+	}
+
+	data, err := proto.Marshal(in)
+	if err != nil {
+		return err
+	}
+
+	ticket := c.req.Header.Get(ticketHeader)
+	// Use a test ticket under test environment.
+	if ticket == "" {
+		if appid := ctx.Value(&appIDOverrideKey); appid != nil {
+			ticket = appid.(string) + defaultTicketSuffix
+		}
+	}
+	// Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver.
+	if ticket == "" {
+		ticket = DefaultTicket()
+	}
+	req := &remotepb.Request{
+		ServiceName: &service,
+		Method:      &method,
+		Request:     data,
+		RequestId:   &ticket,
+	}
+	hreqBody, err := proto.Marshal(req)
+	if err != nil {
+		return err
+	}
+
+	hrespBody, err := c.post(hreqBody, timeout)
+	if err != nil {
+		return err
+	}
+
+	res := &remotepb.Response{}
+	if err := proto.Unmarshal(hrespBody, res); err != nil {
+		return err
+	}
+	if res.RpcError != nil {
+		ce := &CallError{
+			Detail: res.RpcError.GetDetail(),
+			Code:   *res.RpcError.Code,
+		}
+		switch remotepb.RpcError_ErrorCode(ce.Code) {
+		case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
+			ce.Timeout = true
+		}
+		return ce
+	}
+	if res.ApplicationError != nil {
+		return &APIError{
+			Service: *req.ServiceName,
+			Detail:  res.ApplicationError.GetDetail(),
+			Code:    *res.ApplicationError.Code,
+		}
+	}
+	if res.Exception != nil || res.JavaException != nil {
+		// This shouldn't happen, but let's be defensive.
+		return &CallError{
+			Detail: "service bridge returned exception",
+			Code:   int32(remotepb.RpcError_UNKNOWN),
+		}
+	}
+	return proto.Unmarshal(res.Response, out)
+}
+
+func (c *context) Request() *http.Request {
+	return c.req
+}
+
+func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
+	// Truncate long log lines.
+	// TODO(dsymonds): Check if this is still necessary.
+	const lim = 8 << 10
+	if len(*ll.Message) > lim {
+		suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
+		ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
+	}
+
+	c.pendingLogs.Lock()
+	c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
+	c.pendingLogs.Unlock()
+}
+
+var logLevelName = map[int64]string{
+	0: "DEBUG",
+	1: "INFO",
+	2: "WARNING",
+	3: "ERROR",
+	4: "CRITICAL",
+}
+
+func logf(c *context, level int64, format string, args ...interface{}) {
+	if c == nil {
+		panic("not an App Engine context")
+	}
+	s := fmt.Sprintf(format, args...)
+	s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
+	c.addLogLine(&logpb.UserAppLogLine{
+		TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
+		Level:         &level,
+		Message:       &s,
+	})
+	log.Print(logLevelName[level] + ": " + s)
+}
+
+// flushLog attempts to flush any pending logs to the appserver.
+// It should not be called concurrently.
+func (c *context) flushLog(force bool) (flushed bool) {
+	c.pendingLogs.Lock()
+	// Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
+	n, rem := 0, 30<<20
+	for ; n < len(c.pendingLogs.lines); n++ {
+		ll := c.pendingLogs.lines[n]
+		// Each log line will require about 3 bytes of overhead.
+		nb := proto.Size(ll) + 3
+		if nb > rem {
+			break
+		}
+		rem -= nb
+	}
+	lines := c.pendingLogs.lines[:n]
+	c.pendingLogs.lines = c.pendingLogs.lines[n:]
+	c.pendingLogs.Unlock()
+
+	if len(lines) == 0 && !force {
+		// Nothing to flush.
+		return false
+	}
+
+	rescueLogs := false
+	defer func() {
+		if rescueLogs {
+			c.pendingLogs.Lock()
+			c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
+			c.pendingLogs.Unlock()
+		}
+	}()
+
+	buf, err := proto.Marshal(&logpb.UserAppLogGroup{
+		LogLine: lines,
+	})
+	if err != nil {
+		log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
+		rescueLogs = true
+		return false
+	}
+
+	req := &logpb.FlushRequest{
+		Logs: buf,
+	}
+	res := &basepb.VoidProto{}
+	c.pendingLogs.Lock()
+	c.pendingLogs.flushes++
+	c.pendingLogs.Unlock()
+	if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
+		log.Printf("internal.flushLog: Flush RPC: %v", err)
+		rescueLogs = true
+		return false
+	}
+	return true
+}
+
+const (
+	// Log flushing parameters.
+	flushInterval      = 1 * time.Second
+	forceFlushInterval = 60 * time.Second
+)
+
+func (c *context) logFlusher(stop <-chan int) {
+	lastFlush := time.Now()
+	tick := time.NewTicker(flushInterval)
+	for {
+		select {
+		case <-stop:
+			// Request finished.
+			tick.Stop()
+			return
+		case <-tick.C:
+			force := time.Now().Sub(lastFlush) > forceFlushInterval
+			if c.flushLog(force) {
+				lastFlush = time.Now()
+			}
+		}
+	}
+}
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+	return toContext(&context{req: req})
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
new file mode 100644
index 00000000..952b6e66
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_classic.go
@@ -0,0 +1,165 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"time"
+
+	"appengine"
+	"appengine_internal"
+	basepb "appengine_internal/base"
+
+	"github.com/golang/protobuf/proto"
+	netcontext "golang.org/x/net/context"
+)
+
+var contextKey = "holds an appengine.Context"
+
+// fromContext returns the App Engine context or nil if ctx is not
+// derived from an App Engine context.
+func fromContext(ctx netcontext.Context) appengine.Context {
+	c, _ := ctx.Value(&contextKey).(appengine.Context)
+	return c
+}
+
+// This is only for classic App Engine adapters.
+func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) {
+	c := fromContext(ctx)
+	if c == nil {
+		return nil, errNotAppEngineContext
+	}
+	return c, nil
+}
+
+func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
+	ctx := netcontext.WithValue(parent, &contextKey, c)
+
+	s := &basepb.StringProto{}
+	c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
+	if ns := s.GetValue(); ns != "" {
+		ctx = NamespacedContext(ctx, ns)
+	}
+
+	return ctx
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+	if c := fromContext(ctx); c != nil {
+		if req, ok := c.Request().(*http.Request); ok {
+			return req.Header
+		}
+	}
+	return nil
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+	c := appengine.NewContext(req)
+	return withContext(parent, c)
+}
+
+type testingContext struct {
+	appengine.Context
+
+	req *http.Request
+}
+
+func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
+func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
+	if service == "__go__" && method == "GetNamespace" {
+		return nil
+	}
+	return fmt.Errorf("testingContext: unsupported Call")
+}
+func (t *testingContext) Request() interface{} { return t.req }
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+	return withContext(netcontext.Background(), &testingContext{req: req})
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+	if ns := NamespaceFromContext(ctx); ns != "" {
+		if fn, ok := NamespaceMods[service]; ok {
+			fn(in, ns)
+		}
+	}
+
+	if f, ctx, ok := callOverrideFromContext(ctx); ok {
+		return f(ctx, service, method, in, out)
+	}
+
+	// Handle already-done contexts quickly.
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+
+	c := fromContext(ctx)
+	if c == nil {
+		// Give a good error message rather than a panic lower down.
+		return errNotAppEngineContext
+	}
+
+	// Apply transaction modifications if we're in a transaction.
+	if t := transactionFromContext(ctx); t != nil {
+		if t.finished {
+			return errors.New("transaction context has expired")
+		}
+		applyTransaction(in, &t.transaction)
+	}
+
+	var opts *appengine_internal.CallOptions
+	if d, ok := ctx.Deadline(); ok {
+		opts = &appengine_internal.CallOptions{
+			Timeout: d.Sub(time.Now()),
+		}
+	}
+
+	err := c.Call(service, method, in, out, opts)
+	switch v := err.(type) {
+	case *appengine_internal.APIError:
+		return &APIError{
+			Service: v.Service,
+			Detail:  v.Detail,
+			Code:    v.Code,
+		}
+	case *appengine_internal.CallError:
+		return &CallError{
+			Detail:  v.Detail,
+			Code:    v.Code,
+			Timeout: v.Timeout,
+		}
+	}
+	return err
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+	panic("handleHTTP called; this should be impossible")
+}
+
+func logf(c appengine.Context, level int64, format string, args ...interface{}) {
+	var fn func(format string, args ...interface{})
+	switch level {
+	case 0:
+		fn = c.Debugf
+	case 1:
+		fn = c.Infof
+	case 2:
+		fn = c.Warningf
+	case 3:
+		fn = c.Errorf
+	case 4:
+		fn = c.Criticalf
+	default:
+		// This shouldn't happen.
+		fn = c.Criticalf
+	}
+	fn(format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
new file mode 100644
index 00000000..e0c0b214
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_common.go
@@ -0,0 +1,123 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+	"errors"
+	"os"
+
+	"github.com/golang/protobuf/proto"
+	netcontext "golang.org/x/net/context"
+)
+
+var errNotAppEngineContext = errors.New("not an App Engine context")
+
+type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
+
+var callOverrideKey = "holds []CallOverrideFunc"
+
+func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
+	// We avoid appending to any existing call override
+	// so we don't risk overwriting a popped stack below.
+	var cofs []CallOverrideFunc
+	if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
+		cofs = append(cofs, uf...)
+	}
+	cofs = append(cofs, f)
+	return netcontext.WithValue(ctx, &callOverrideKey, cofs)
+}
+
+func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
+	cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
+	if len(cofs) == 0 {
+		return nil, nil, false
+	}
+	// We found a list of overrides; grab the last, and reconstitute a
+	// context that will hide it.
+	f := cofs[len(cofs)-1]
+	ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
+	return f, ctx, true
+}
+
+type logOverrideFunc func(level int64, format string, args ...interface{})
+
+var logOverrideKey = "holds a logOverrideFunc"
+
+func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
+	return netcontext.WithValue(ctx, &logOverrideKey, f)
+}
+
+var appIDOverrideKey = "holds a string, being the full app ID"
+
+func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
+	return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
+}
+
+var namespaceKey = "holds the namespace string"
+
+func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
+	return netcontext.WithValue(ctx, &namespaceKey, ns)
+}
+
+func NamespaceFromContext(ctx netcontext.Context) string {
+	// If there's no namespace, return the empty string.
+	ns, _ := ctx.Value(&namespaceKey).(string)
+	return ns
+}
+
+// FullyQualifiedAppID returns the fully-qualified application ID.
+// This may contain a partition prefix (e.g. "s~" for High Replication apps),
+// or a domain prefix (e.g. "example.com:").
+func FullyQualifiedAppID(ctx netcontext.Context) string {
+	if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
+		return id
+	}
+	return fullyQualifiedAppID(ctx)
+}
+
+func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
+	if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
+		f(level, format, args...)
+		return
+	}
+	c := fromContext(ctx)
+	if c == nil {
+		panic(errNotAppEngineContext)
+	}
+	logf(c, level, format, args...)
+}
+
+// NamespacedContext wraps a Context to support namespaces.
+func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
+	return withNamespace(ctx, namespace)
+}
+
+// SetTestEnv sets the env variables for testing background ticket in Flex.
+func SetTestEnv() func() {
+	var environ = []struct {
+		key, value string
+	}{
+		{"GAE_LONG_APP_ID", "my-app-id"},
+		{"GAE_MINOR_VERSION", "067924799508853122"},
+		{"GAE_MODULE_INSTANCE", "0"},
+		{"GAE_MODULE_NAME", "default"},
+		{"GAE_MODULE_VERSION", "20150612t184001"},
+	}
+
+	for _, v := range environ {
+		old := os.Getenv(v.key)
+		os.Setenv(v.key, v.value)
+		v.value = old
+	}
+	return func() { // Restore old environment after the test completes.
+		for _, v := range environ {
+			if v.value == "" {
+				os.Unsetenv(v.key)
+				continue
+			}
+			os.Setenv(v.key, v.value)
+		}
+	}
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
new file mode 100644
index 00000000..11df8c07
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_id.go
@@ -0,0 +1,28 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+	"strings"
+)
+
+func parseFullAppID(appid string) (partition, domain, displayID string) {
+	if i := strings.Index(appid, "~"); i != -1 {
+		partition, appid = appid[:i], appid[i+1:]
+	}
+	if i := strings.Index(appid, ":"); i != -1 {
+		domain, appid = appid[:i], appid[i+1:]
+	}
+	return partition, domain, appid
+}
+
+// appID returns "appid" or "domain.com:appid".
+func appID(fullAppID string) string {
+	_, dom, dis := parseFullAppID(fullAppID)
+	if dom != "" {
+		return dom + ":" + dis
+	}
+	return dis
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
new file mode 100644
index 00000000..87d9701b
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
@@ -0,0 +1,296 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+// DO NOT EDIT!
+
+/*
+Package app_identity is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+
+It has these top-level messages:
+	AppIdentityServiceError
+	SignForAppRequest
+	SignForAppResponse
+	GetPublicCertificateForAppRequest
+	PublicCertificate
+	GetPublicCertificateForAppResponse
+	GetServiceAccountNameRequest
+	GetServiceAccountNameResponse
+	GetAccessTokenRequest
+	GetAccessTokenResponse
+	GetDefaultGcsBucketNameRequest
+	GetDefaultGcsBucketNameResponse
+*/
+package app_identity
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type AppIdentityServiceError_ErrorCode int32
+
+const (
+	AppIdentityServiceError_SUCCESS           AppIdentityServiceError_ErrorCode = 0
+	AppIdentityServiceError_UNKNOWN_SCOPE     AppIdentityServiceError_ErrorCode = 9
+	AppIdentityServiceError_BLOB_TOO_LARGE    AppIdentityServiceError_ErrorCode = 1000
+	AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001
+	AppIdentityServiceError_NOT_A_VALID_APP   AppIdentityServiceError_ErrorCode = 1002
+	AppIdentityServiceError_UNKNOWN_ERROR     AppIdentityServiceError_ErrorCode = 1003
+	AppIdentityServiceError_NOT_ALLOWED       AppIdentityServiceError_ErrorCode = 1005
+	AppIdentityServiceError_NOT_IMPLEMENTED   AppIdentityServiceError_ErrorCode = 1006
+)
+
+var AppIdentityServiceError_ErrorCode_name = map[int32]string{
+	0:    "SUCCESS",
+	9:    "UNKNOWN_SCOPE",
+	1000: "BLOB_TOO_LARGE",
+	1001: "DEADLINE_EXCEEDED",
+	1002: "NOT_A_VALID_APP",
+	1003: "UNKNOWN_ERROR",
+	1005: "NOT_ALLOWED",
+	1006: "NOT_IMPLEMENTED",
+}
+var AppIdentityServiceError_ErrorCode_value = map[string]int32{
+	"SUCCESS":           0,
+	"UNKNOWN_SCOPE":     9,
+	"BLOB_TOO_LARGE":    1000,
+	"DEADLINE_EXCEEDED": 1001,
+	"NOT_A_VALID_APP":   1002,
+	"UNKNOWN_ERROR":     1003,
+	"NOT_ALLOWED":       1005,
+	"NOT_IMPLEMENTED":   1006,
+}
+
+func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode {
+	p := new(AppIdentityServiceError_ErrorCode)
+	*p = x
+	return p
+}
+func (x AppIdentityServiceError_ErrorCode) String() string {
+	return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x))
+}
+func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode")
+	if err != nil {
+		return err
+	}
+	*x = AppIdentityServiceError_ErrorCode(value)
+	return nil
+}
+
+type AppIdentityServiceError struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AppIdentityServiceError) Reset()         { *m = AppIdentityServiceError{} }
+func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }
+func (*AppIdentityServiceError) ProtoMessage()    {}
+
+type SignForAppRequest struct {
+	BytesToSign      []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SignForAppRequest) Reset()         { *m = SignForAppRequest{} }
+func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*SignForAppRequest) ProtoMessage()    {}
+
+func (m *SignForAppRequest) GetBytesToSign() []byte {
+	if m != nil {
+		return m.BytesToSign
+	}
+	return nil
+}
+
+type SignForAppResponse struct {
+	KeyName          *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+	SignatureBytes   []byte  `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *SignForAppResponse) Reset()         { *m = SignForAppResponse{} }
+func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*SignForAppResponse) ProtoMessage()    {}
+
+func (m *SignForAppResponse) GetKeyName() string {
+	if m != nil && m.KeyName != nil {
+		return *m.KeyName
+	}
+	return ""
+}
+
+func (m *SignForAppResponse) GetSignatureBytes() []byte {
+	if m != nil {
+		return m.SignatureBytes
+	}
+	return nil
+}
+
+type GetPublicCertificateForAppRequest struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppRequest) Reset()         { *m = GetPublicCertificateForAppRequest{} }
+func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppRequest) ProtoMessage()    {}
+
+type PublicCertificate struct {
+	KeyName            *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+	X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"`
+	XXX_unrecognized   []byte  `json:"-"`
+}
+
+func (m *PublicCertificate) Reset()         { *m = PublicCertificate{} }
+func (m *PublicCertificate) String() string { return proto.CompactTextString(m) }
+func (*PublicCertificate) ProtoMessage()    {}
+
+func (m *PublicCertificate) GetKeyName() string {
+	if m != nil && m.KeyName != nil {
+		return *m.KeyName
+	}
+	return ""
+}
+
+func (m *PublicCertificate) GetX509CertificatePem() string {
+	if m != nil && m.X509CertificatePem != nil {
+		return *m.X509CertificatePem
+	}
+	return ""
+}
+
+type GetPublicCertificateForAppResponse struct {
+	PublicCertificateList      []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"`
+	MaxClientCacheTimeInSecond *int64               `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"`
+	XXX_unrecognized           []byte               `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppResponse) Reset()         { *m = GetPublicCertificateForAppResponse{} }
+func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppResponse) ProtoMessage()    {}
+
+func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate {
+	if m != nil {
+		return m.PublicCertificateList
+	}
+	return nil
+}
+
+func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 {
+	if m != nil && m.MaxClientCacheTimeInSecond != nil {
+		return *m.MaxClientCacheTimeInSecond
+	}
+	return 0
+}
+
+type GetServiceAccountNameRequest struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetServiceAccountNameRequest) Reset()         { *m = GetServiceAccountNameRequest{} }
+func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameRequest) ProtoMessage()    {}
+
+type GetServiceAccountNameResponse struct {
+	ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"`
+	XXX_unrecognized   []byte  `json:"-"`
+}
+
+func (m *GetServiceAccountNameResponse) Reset()         { *m = GetServiceAccountNameResponse{} }
+func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameResponse) ProtoMessage()    {}
+
+func (m *GetServiceAccountNameResponse) GetServiceAccountName() string {
+	if m != nil && m.ServiceAccountName != nil {
+		return *m.ServiceAccountName
+	}
+	return ""
+}
+
+type GetAccessTokenRequest struct {
+	Scope              []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"`
+	ServiceAccountId   *int64   `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"`
+	ServiceAccountName *string  `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"`
+	XXX_unrecognized   []byte   `json:"-"`
+}
+
+func (m *GetAccessTokenRequest) Reset()         { *m = GetAccessTokenRequest{} }
+func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenRequest) ProtoMessage()    {}
+
+func (m *GetAccessTokenRequest) GetScope() []string {
+	if m != nil {
+		return m.Scope
+	}
+	return nil
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountId() int64 {
+	if m != nil && m.ServiceAccountId != nil {
+		return *m.ServiceAccountId
+	}
+	return 0
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountName() string {
+	if m != nil && m.ServiceAccountName != nil {
+		return *m.ServiceAccountName
+	}
+	return ""
+}
+
+type GetAccessTokenResponse struct {
+	AccessToken      *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"`
+	ExpirationTime   *int64  `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *GetAccessTokenResponse) Reset()         { *m = GetAccessTokenResponse{} }
+func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenResponse) ProtoMessage()    {}
+
+func (m *GetAccessTokenResponse) GetAccessToken() string {
+	if m != nil && m.AccessToken != nil {
+		return *m.AccessToken
+	}
+	return ""
+}
+
+func (m *GetAccessTokenResponse) GetExpirationTime() int64 {
+	if m != nil && m.ExpirationTime != nil {
+		return *m.ExpirationTime
+	}
+	return 0
+}
+
+type GetDefaultGcsBucketNameRequest struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameRequest) Reset()         { *m = GetDefaultGcsBucketNameRequest{} }
+func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameRequest) ProtoMessage()    {}
+
+type GetDefaultGcsBucketNameResponse struct {
+	DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"`
+	XXX_unrecognized     []byte  `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameResponse) Reset()         { *m = GetDefaultGcsBucketNameResponse{} }
+func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameResponse) ProtoMessage()    {}
+
+func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string {
+	if m != nil && m.DefaultGcsBucketName != nil {
+		return *m.DefaultGcsBucketName
+	}
+	return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
new file mode 100644
index 00000000..19610ca5
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "app_identity";
+
+package appengine;
+
+message AppIdentityServiceError {
+  enum ErrorCode {
+    SUCCESS = 0;
+    UNKNOWN_SCOPE = 9;
+    BLOB_TOO_LARGE = 1000;
+    DEADLINE_EXCEEDED = 1001;
+    NOT_A_VALID_APP = 1002;
+    UNKNOWN_ERROR = 1003;
+    NOT_ALLOWED = 1005;
+    NOT_IMPLEMENTED = 1006;
+  }
+}
+
+message SignForAppRequest {
+  optional bytes bytes_to_sign = 1;
+}
+
+message SignForAppResponse {
+  optional string key_name = 1;
+  optional bytes signature_bytes = 2;
+}
+
+message GetPublicCertificateForAppRequest {
+}
+
+message PublicCertificate {
+  optional string key_name = 1;
+  optional string x509_certificate_pem = 2;
+}
+
+message GetPublicCertificateForAppResponse {
+  repeated PublicCertificate public_certificate_list = 1;
+  optional int64 max_client_cache_time_in_second = 2;
+}
+
+message GetServiceAccountNameRequest {
+}
+
+message GetServiceAccountNameResponse {
+  optional string service_account_name = 1;
+}
+
+message GetAccessTokenRequest {
+  repeated string scope = 1;
+  optional int64 service_account_id = 2;
+  optional string service_account_name = 3;
+}
+
+message GetAccessTokenResponse {
+  optional string access_token = 1;
+  optional int64 expiration_time = 2;
+}
+
+message GetDefaultGcsBucketNameRequest {
+}
+
+message GetDefaultGcsBucketNameResponse {
+  optional string default_gcs_bucket_name = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
new file mode 100644
index 00000000..36a19565
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
@@ -0,0 +1,133 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/base/api_base.proto
+// DO NOT EDIT!
+
+/*
+Package base is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/base/api_base.proto
+
+It has these top-level messages:
+	StringProto
+	Integer32Proto
+	Integer64Proto
+	BoolProto
+	DoubleProto
+	BytesProto
+	VoidProto
+*/
+package base
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type StringProto struct {
+	Value            *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *StringProto) Reset()         { *m = StringProto{} }
+func (m *StringProto) String() string { return proto.CompactTextString(m) }
+func (*StringProto) ProtoMessage()    {}
+
+func (m *StringProto) GetValue() string {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return ""
+}
+
+type Integer32Proto struct {
+	Value            *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer32Proto) Reset()         { *m = Integer32Proto{} }
+func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer32Proto) ProtoMessage()    {}
+
+func (m *Integer32Proto) GetValue() int32 {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return 0
+}
+
+type Integer64Proto struct {
+	Value            *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer64Proto) Reset()         { *m = Integer64Proto{} }
+func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer64Proto) ProtoMessage()    {}
+
+func (m *Integer64Proto) GetValue() int64 {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return 0
+}
+
+type BoolProto struct {
+	Value            *bool  `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BoolProto) Reset()         { *m = BoolProto{} }
+func (m *BoolProto) String() string { return proto.CompactTextString(m) }
+func (*BoolProto) ProtoMessage()    {}
+
+func (m *BoolProto) GetValue() bool {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return false
+}
+
+type DoubleProto struct {
+	Value            *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *DoubleProto) Reset()         { *m = DoubleProto{} }
+func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
+func (*DoubleProto) ProtoMessage()    {}
+
+func (m *DoubleProto) GetValue() float64 {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return 0
+}
+
+type BytesProto struct {
+	Value            []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BytesProto) Reset()         { *m = BytesProto{} }
+func (m *BytesProto) String() string { return proto.CompactTextString(m) }
+func (*BytesProto) ProtoMessage()    {}
+
+func (m *BytesProto) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+type VoidProto struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *VoidProto) Reset()         { *m = VoidProto{} }
+func (m *VoidProto) String() string { return proto.CompactTextString(m) }
+func (*VoidProto) ProtoMessage()    {}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
new file mode 100644
index 00000000..56cd7a3c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.proto
@@ -0,0 +1,33 @@
+// Built-in base types for API calls. Primarily useful as return types.
+
+syntax = "proto2";
+option go_package = "base";
+
+package appengine.base;
+
+message StringProto {
+  required string value = 1;
+}
+
+message Integer32Proto {
+  required int32 value = 1;
+}
+
+message Integer64Proto {
+  required int64 value = 1;
+}
+
+message BoolProto {
+  required bool value = 1;
+}
+
+message DoubleProto {
+  required double value = 1;
+}
+
+message BytesProto {
+  required bytes value = 1 [ctype=CORD];
+}
+
+message VoidProto {
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
new file mode 100644
index 00000000..8613cb73
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
@@ -0,0 +1,2778 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
+// DO NOT EDIT!
+
+/*
+Package datastore is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/datastore/datastore_v3.proto
+
+It has these top-level messages:
+	Action
+	PropertyValue
+	Property
+	Path
+	Reference
+	User
+	EntityProto
+	CompositeProperty
+	Index
+	CompositeIndex
+	IndexPostfix
+	IndexPosition
+	Snapshot
+	InternalHeader
+	Transaction
+	Query
+	CompiledQuery
+	CompiledCursor
+	Cursor
+	Error
+	Cost
+	GetRequest
+	GetResponse
+	PutRequest
+	PutResponse
+	TouchRequest
+	TouchResponse
+	DeleteRequest
+	DeleteResponse
+	NextRequest
+	QueryResult
+	AllocateIdsRequest
+	AllocateIdsResponse
+	CompositeIndices
+	AddActionsRequest
+	AddActionsResponse
+	BeginTransactionRequest
+	CommitResponse
+*/
+package datastore
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Property_Meaning int32
+
+const (
+	Property_NO_MEANING       Property_Meaning = 0
+	Property_BLOB             Property_Meaning = 14
+	Property_TEXT             Property_Meaning = 15
+	Property_BYTESTRING       Property_Meaning = 16
+	Property_ATOM_CATEGORY    Property_Meaning = 1
+	Property_ATOM_LINK        Property_Meaning = 2
+	Property_ATOM_TITLE       Property_Meaning = 3
+	Property_ATOM_CONTENT     Property_Meaning = 4
+	Property_ATOM_SUMMARY     Property_Meaning = 5
+	Property_ATOM_AUTHOR      Property_Meaning = 6
+	Property_GD_WHEN          Property_Meaning = 7
+	Property_GD_EMAIL         Property_Meaning = 8
+	Property_GEORSS_POINT     Property_Meaning = 9
+	Property_GD_IM            Property_Meaning = 10
+	Property_GD_PHONENUMBER   Property_Meaning = 11
+	Property_GD_POSTALADDRESS Property_Meaning = 12
+	Property_GD_RATING        Property_Meaning = 13
+	Property_BLOBKEY          Property_Meaning = 17
+	Property_ENTITY_PROTO     Property_Meaning = 19
+	Property_INDEX_VALUE      Property_Meaning = 18
+)
+
+var Property_Meaning_name = map[int32]string{
+	0:  "NO_MEANING",
+	14: "BLOB",
+	15: "TEXT",
+	16: "BYTESTRING",
+	1:  "ATOM_CATEGORY",
+	2:  "ATOM_LINK",
+	3:  "ATOM_TITLE",
+	4:  "ATOM_CONTENT",
+	5:  "ATOM_SUMMARY",
+	6:  "ATOM_AUTHOR",
+	7:  "GD_WHEN",
+	8:  "GD_EMAIL",
+	9:  "GEORSS_POINT",
+	10: "GD_IM",
+	11: "GD_PHONENUMBER",
+	12: "GD_POSTALADDRESS",
+	13: "GD_RATING",
+	17: "BLOBKEY",
+	19: "ENTITY_PROTO",
+	18: "INDEX_VALUE",
+}
+var Property_Meaning_value = map[string]int32{
+	"NO_MEANING":       0,
+	"BLOB":             14,
+	"TEXT":             15,
+	"BYTESTRING":       16,
+	"ATOM_CATEGORY":    1,
+	"ATOM_LINK":        2,
+	"ATOM_TITLE":       3,
+	"ATOM_CONTENT":     4,
+	"ATOM_SUMMARY":     5,
+	"ATOM_AUTHOR":      6,
+	"GD_WHEN":          7,
+	"GD_EMAIL":         8,
+	"GEORSS_POINT":     9,
+	"GD_IM":            10,
+	"GD_PHONENUMBER":   11,
+	"GD_POSTALADDRESS": 12,
+	"GD_RATING":        13,
+	"BLOBKEY":          17,
+	"ENTITY_PROTO":     19,
+	"INDEX_VALUE":      18,
+}
+
+func (x Property_Meaning) Enum() *Property_Meaning {
+	p := new(Property_Meaning)
+	*p = x
+	return p
+}
+func (x Property_Meaning) String() string {
+	return proto.EnumName(Property_Meaning_name, int32(x))
+}
+func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
+	if err != nil {
+		return err
+	}
+	*x = Property_Meaning(value)
+	return nil
+}
+
+type Property_FtsTokenizationOption int32
+
+const (
+	Property_HTML Property_FtsTokenizationOption = 1
+	Property_ATOM Property_FtsTokenizationOption = 2
+)
+
+var Property_FtsTokenizationOption_name = map[int32]string{
+	1: "HTML",
+	2: "ATOM",
+}
+var Property_FtsTokenizationOption_value = map[string]int32{
+	"HTML": 1,
+	"ATOM": 2,
+}
+
+func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
+	p := new(Property_FtsTokenizationOption)
+	*p = x
+	return p
+}
+func (x Property_FtsTokenizationOption) String() string {
+	return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
+}
+func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
+	if err != nil {
+		return err
+	}
+	*x = Property_FtsTokenizationOption(value)
+	return nil
+}
+
+type EntityProto_Kind int32
+
+const (
+	EntityProto_GD_CONTACT EntityProto_Kind = 1
+	EntityProto_GD_EVENT   EntityProto_Kind = 2
+	EntityProto_GD_MESSAGE EntityProto_Kind = 3
+)
+
+var EntityProto_Kind_name = map[int32]string{
+	1: "GD_CONTACT",
+	2: "GD_EVENT",
+	3: "GD_MESSAGE",
+}
+var EntityProto_Kind_value = map[string]int32{
+	"GD_CONTACT": 1,
+	"GD_EVENT":   2,
+	"GD_MESSAGE": 3,
+}
+
+func (x EntityProto_Kind) Enum() *EntityProto_Kind {
+	p := new(EntityProto_Kind)
+	*p = x
+	return p
+}
+func (x EntityProto_Kind) String() string {
+	return proto.EnumName(EntityProto_Kind_name, int32(x))
+}
+func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
+	if err != nil {
+		return err
+	}
+	*x = EntityProto_Kind(value)
+	return nil
+}
+
+type Index_Property_Direction int32
+
+const (
+	Index_Property_ASCENDING  Index_Property_Direction = 1
+	Index_Property_DESCENDING Index_Property_Direction = 2
+)
+
+var Index_Property_Direction_name = map[int32]string{
+	1: "ASCENDING",
+	2: "DESCENDING",
+}
+var Index_Property_Direction_value = map[string]int32{
+	"ASCENDING":  1,
+	"DESCENDING": 2,
+}
+
+func (x Index_Property_Direction) Enum() *Index_Property_Direction {
+	p := new(Index_Property_Direction)
+	*p = x
+	return p
+}
+func (x Index_Property_Direction) String() string {
+	return proto.EnumName(Index_Property_Direction_name, int32(x))
+}
+func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
+	if err != nil {
+		return err
+	}
+	*x = Index_Property_Direction(value)
+	return nil
+}
+
+type CompositeIndex_State int32
+
+const (
+	CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
+	CompositeIndex_READ_WRITE CompositeIndex_State = 2
+	CompositeIndex_DELETED    CompositeIndex_State = 3
+	CompositeIndex_ERROR      CompositeIndex_State = 4
+)
+
+var CompositeIndex_State_name = map[int32]string{
+	1: "WRITE_ONLY",
+	2: "READ_WRITE",
+	3: "DELETED",
+	4: "ERROR",
+}
+var CompositeIndex_State_value = map[string]int32{
+	"WRITE_ONLY": 1,
+	"READ_WRITE": 2,
+	"DELETED":    3,
+	"ERROR":      4,
+}
+
+func (x CompositeIndex_State) Enum() *CompositeIndex_State {
+	p := new(CompositeIndex_State)
+	*p = x
+	return p
+}
+func (x CompositeIndex_State) String() string {
+	return proto.EnumName(CompositeIndex_State_name, int32(x))
+}
+func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
+	if err != nil {
+		return err
+	}
+	*x = CompositeIndex_State(value)
+	return nil
+}
+
+type Snapshot_Status int32
+
+const (
+	Snapshot_INACTIVE Snapshot_Status = 0
+	Snapshot_ACTIVE   Snapshot_Status = 1
+)
+
+var Snapshot_Status_name = map[int32]string{
+	0: "INACTIVE",
+	1: "ACTIVE",
+}
+var Snapshot_Status_value = map[string]int32{
+	"INACTIVE": 0,
+	"ACTIVE":   1,
+}
+
+func (x Snapshot_Status) Enum() *Snapshot_Status {
+	p := new(Snapshot_Status)
+	*p = x
+	return p
+}
+func (x Snapshot_Status) String() string {
+	return proto.EnumName(Snapshot_Status_name, int32(x))
+}
+func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
+	if err != nil {
+		return err
+	}
+	*x = Snapshot_Status(value)
+	return nil
+}
+
+type Query_Hint int32
+
+const (
+	Query_ORDER_FIRST    Query_Hint = 1
+	Query_ANCESTOR_FIRST Query_Hint = 2
+	Query_FILTER_FIRST   Query_Hint = 3
+)
+
+var Query_Hint_name = map[int32]string{
+	1: "ORDER_FIRST",
+	2: "ANCESTOR_FIRST",
+	3: "FILTER_FIRST",
+}
+var Query_Hint_value = map[string]int32{
+	"ORDER_FIRST":    1,
+	"ANCESTOR_FIRST": 2,
+	"FILTER_FIRST":   3,
+}
+
+func (x Query_Hint) Enum() *Query_Hint {
+	p := new(Query_Hint)
+	*p = x
+	return p
+}
+func (x Query_Hint) String() string {
+	return proto.EnumName(Query_Hint_name, int32(x))
+}
+func (x *Query_Hint) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
+	if err != nil {
+		return err
+	}
+	*x = Query_Hint(value)
+	return nil
+}
+
+type Query_Filter_Operator int32
+
+const (
+	Query_Filter_LESS_THAN             Query_Filter_Operator = 1
+	Query_Filter_LESS_THAN_OR_EQUAL    Query_Filter_Operator = 2
+	Query_Filter_GREATER_THAN          Query_Filter_Operator = 3
+	Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
+	Query_Filter_EQUAL                 Query_Filter_Operator = 5
+	Query_Filter_IN                    Query_Filter_Operator = 6
+	Query_Filter_EXISTS                Query_Filter_Operator = 7
+)
+
+var Query_Filter_Operator_name = map[int32]string{
+	1: "LESS_THAN",
+	2: "LESS_THAN_OR_EQUAL",
+	3: "GREATER_THAN",
+	4: "GREATER_THAN_OR_EQUAL",
+	5: "EQUAL",
+	6: "IN",
+	7: "EXISTS",
+}
+var Query_Filter_Operator_value = map[string]int32{
+	"LESS_THAN":             1,
+	"LESS_THAN_OR_EQUAL":    2,
+	"GREATER_THAN":          3,
+	"GREATER_THAN_OR_EQUAL": 4,
+	"EQUAL":                 5,
+	"IN":                    6,
+	"EXISTS":                7,
+}
+
+func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
+	p := new(Query_Filter_Operator)
+	*p = x
+	return p
+}
+func (x Query_Filter_Operator) String() string {
+	return proto.EnumName(Query_Filter_Operator_name, int32(x))
+}
+func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
+	if err != nil {
+		return err
+	}
+	*x = Query_Filter_Operator(value)
+	return nil
+}
+
+type Query_Order_Direction int32
+
+const (
+	Query_Order_ASCENDING  Query_Order_Direction = 1
+	Query_Order_DESCENDING Query_Order_Direction = 2
+)
+
+var Query_Order_Direction_name = map[int32]string{
+	1: "ASCENDING",
+	2: "DESCENDING",
+}
+var Query_Order_Direction_value = map[string]int32{
+	"ASCENDING":  1,
+	"DESCENDING": 2,
+}
+
+func (x Query_Order_Direction) Enum() *Query_Order_Direction {
+	p := new(Query_Order_Direction)
+	*p = x
+	return p
+}
+func (x Query_Order_Direction) String() string {
+	return proto.EnumName(Query_Order_Direction_name, int32(x))
+}
+func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
+	if err != nil {
+		return err
+	}
+	*x = Query_Order_Direction(value)
+	return nil
+}
+
+type Error_ErrorCode int32
+
+const (
+	Error_BAD_REQUEST                  Error_ErrorCode = 1
+	Error_CONCURRENT_TRANSACTION       Error_ErrorCode = 2
+	Error_INTERNAL_ERROR               Error_ErrorCode = 3
+	Error_NEED_INDEX                   Error_ErrorCode = 4
+	Error_TIMEOUT                      Error_ErrorCode = 5
+	Error_PERMISSION_DENIED            Error_ErrorCode = 6
+	Error_BIGTABLE_ERROR               Error_ErrorCode = 7
+	Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
+	Error_CAPABILITY_DISABLED          Error_ErrorCode = 9
+	Error_TRY_ALTERNATE_BACKEND        Error_ErrorCode = 10
+	Error_SAFE_TIME_TOO_OLD            Error_ErrorCode = 11
+)
+
+var Error_ErrorCode_name = map[int32]string{
+	1:  "BAD_REQUEST",
+	2:  "CONCURRENT_TRANSACTION",
+	3:  "INTERNAL_ERROR",
+	4:  "NEED_INDEX",
+	5:  "TIMEOUT",
+	6:  "PERMISSION_DENIED",
+	7:  "BIGTABLE_ERROR",
+	8:  "COMMITTED_BUT_STILL_APPLYING",
+	9:  "CAPABILITY_DISABLED",
+	10: "TRY_ALTERNATE_BACKEND",
+	11: "SAFE_TIME_TOO_OLD",
+}
+var Error_ErrorCode_value = map[string]int32{
+	"BAD_REQUEST":                  1,
+	"CONCURRENT_TRANSACTION":       2,
+	"INTERNAL_ERROR":               3,
+	"NEED_INDEX":                   4,
+	"TIMEOUT":                      5,
+	"PERMISSION_DENIED":            6,
+	"BIGTABLE_ERROR":               7,
+	"COMMITTED_BUT_STILL_APPLYING": 8,
+	"CAPABILITY_DISABLED":          9,
+	"TRY_ALTERNATE_BACKEND":        10,
+	"SAFE_TIME_TOO_OLD":            11,
+}
+
+func (x Error_ErrorCode) Enum() *Error_ErrorCode {
+	p := new(Error_ErrorCode)
+	*p = x
+	return p
+}
+func (x Error_ErrorCode) String() string {
+	return proto.EnumName(Error_ErrorCode_name, int32(x))
+}
+func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
+	if err != nil {
+		return err
+	}
+	*x = Error_ErrorCode(value)
+	return nil
+}
+
+type PutRequest_AutoIdPolicy int32
+
+const (
+	PutRequest_CURRENT    PutRequest_AutoIdPolicy = 0
+	PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
+)
+
+var PutRequest_AutoIdPolicy_name = map[int32]string{
+	0: "CURRENT",
+	1: "SEQUENTIAL",
+}
+var PutRequest_AutoIdPolicy_value = map[string]int32{
+	"CURRENT":    0,
+	"SEQUENTIAL": 1,
+}
+
+func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
+	p := new(PutRequest_AutoIdPolicy)
+	*p = x
+	return p
+}
+func (x PutRequest_AutoIdPolicy) String() string {
+	return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
+}
+func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
+	if err != nil {
+		return err
+	}
+	*x = PutRequest_AutoIdPolicy(value)
+	return nil
+}
+
+type Action struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Action) Reset()         { *m = Action{} }
+func (m *Action) String() string { return proto.CompactTextString(m) }
+func (*Action) ProtoMessage()    {}
+
+type PropertyValue struct {
+	Int64Value       *int64                        `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
+	BooleanValue     *bool                         `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
+	StringValue      *string                       `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
+	DoubleValue      *float64                      `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
+	Pointvalue       *PropertyValue_PointValue     `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"`
+	Uservalue        *PropertyValue_UserValue      `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"`
+	Referencevalue   *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"`
+	XXX_unrecognized []byte                        `json:"-"`
+}
+
+func (m *PropertyValue) Reset()         { *m = PropertyValue{} }
+func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue) ProtoMessage()    {}
+
+func (m *PropertyValue) GetInt64Value() int64 {
+	if m != nil && m.Int64Value != nil {
+		return *m.Int64Value
+	}
+	return 0
+}
+
+func (m *PropertyValue) GetBooleanValue() bool {
+	if m != nil && m.BooleanValue != nil {
+		return *m.BooleanValue
+	}
+	return false
+}
+
+func (m *PropertyValue) GetStringValue() string {
+	if m != nil && m.StringValue != nil {
+		return *m.StringValue
+	}
+	return ""
+}
+
+func (m *PropertyValue) GetDoubleValue() float64 {
+	if m != nil && m.DoubleValue != nil {
+		return *m.DoubleValue
+	}
+	return 0
+}
+
+func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
+	if m != nil {
+		return m.Pointvalue
+	}
+	return nil
+}
+
+func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
+	if m != nil {
+		return m.Uservalue
+	}
+	return nil
+}
+
+func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
+	if m != nil {
+		return m.Referencevalue
+	}
+	return nil
+}
+
+type PropertyValue_PointValue struct {
+	X                *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
+	Y                *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *PropertyValue_PointValue) Reset()         { *m = PropertyValue_PointValue{} }
+func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_PointValue) ProtoMessage()    {}
+
+func (m *PropertyValue_PointValue) GetX() float64 {
+	if m != nil && m.X != nil {
+		return *m.X
+	}
+	return 0
+}
+
+func (m *PropertyValue_PointValue) GetY() float64 {
+	if m != nil && m.Y != nil {
+		return *m.Y
+	}
+	return 0
+}
+
+type PropertyValue_UserValue struct {
+	Email             *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
+	AuthDomain        *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"`
+	Nickname          *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
+	FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"`
+	FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"`
+	XXX_unrecognized  []byte  `json:"-"`
+}
+
+func (m *PropertyValue_UserValue) Reset()         { *m = PropertyValue_UserValue{} }
+func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_UserValue) ProtoMessage()    {}
+
+func (m *PropertyValue_UserValue) GetEmail() string {
+	if m != nil && m.Email != nil {
+		return *m.Email
+	}
+	return ""
+}
+
+func (m *PropertyValue_UserValue) GetAuthDomain() string {
+	if m != nil && m.AuthDomain != nil {
+		return *m.AuthDomain
+	}
+	return ""
+}
+
+func (m *PropertyValue_UserValue) GetNickname() string {
+	if m != nil && m.Nickname != nil {
+		return *m.Nickname
+	}
+	return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
+	if m != nil && m.FederatedIdentity != nil {
+		return *m.FederatedIdentity
+	}
+	return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedProvider() string {
+	if m != nil && m.FederatedProvider != nil {
+		return *m.FederatedProvider
+	}
+	return ""
+}
+
+type PropertyValue_ReferenceValue struct {
+	App              *string                                     `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+	NameSpace        *string                                     `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+	Pathelement      []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"`
+	XXX_unrecognized []byte                                      `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue) Reset()         { *m = PropertyValue_ReferenceValue{} }
+func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue) ProtoMessage()    {}
+
+func (m *PropertyValue_ReferenceValue) GetApp() string {
+	if m != nil && m.App != nil {
+		return *m.App
+	}
+	return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
+	if m != nil && m.NameSpace != nil {
+		return *m.NameSpace
+	}
+	return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
+	if m != nil {
+		return m.Pathelement
+	}
+	return nil
+}
+
+type PropertyValue_ReferenceValue_PathElement struct {
+	Type             *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
+	Id               *int64  `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
+	Name             *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
+	*m = PropertyValue_ReferenceValue_PathElement{}
+}
+func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage()    {}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
+	if m != nil && m.Type != nil {
+		return *m.Type
+	}
+	return ""
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
+	if m != nil && m.Id != nil {
+		return *m.Id
+	}
+	return 0
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+type Property struct {
+	Meaning               *Property_Meaning               `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
+	MeaningUri            *string                         `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"`
+	Name                  *string                         `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+	Value                 *PropertyValue                  `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
+	Multiple              *bool                           `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
+	Searchable            *bool                           `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
+	FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
+	Locale                *string                         `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
+	XXX_unrecognized      []byte                          `json:"-"`
+}
+
+func (m *Property) Reset()         { *m = Property{} }
+func (m *Property) String() string { return proto.CompactTextString(m) }
+func (*Property) ProtoMessage()    {}
+
+const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
+const Default_Property_Searchable bool = false
+const Default_Property_Locale string = "en"
+
+func (m *Property) GetMeaning() Property_Meaning {
+	if m != nil && m.Meaning != nil {
+		return *m.Meaning
+	}
+	return Default_Property_Meaning
+}
+
+func (m *Property) GetMeaningUri() string {
+	if m != nil && m.MeaningUri != nil {
+		return *m.MeaningUri
+	}
+	return ""
+}
+
+func (m *Property) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *Property) GetValue() *PropertyValue {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *Property) GetMultiple() bool {
+	if m != nil && m.Multiple != nil {
+		return *m.Multiple
+	}
+	return false
+}
+
+func (m *Property) GetSearchable() bool {
+	if m != nil && m.Searchable != nil {
+		return *m.Searchable
+	}
+	return Default_Property_Searchable
+}
+
+func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
+	if m != nil && m.FtsTokenizationOption != nil {
+		return *m.FtsTokenizationOption
+	}
+	return Property_HTML
+}
+
+func (m *Property) GetLocale() string {
+	if m != nil && m.Locale != nil {
+		return *m.Locale
+	}
+	return Default_Property_Locale
+}
+
+type Path struct {
+	Element          []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *Path) Reset()         { *m = Path{} }
+func (m *Path) String() string { return proto.CompactTextString(m) }
+func (*Path) ProtoMessage()    {}
+
+func (m *Path) GetElement() []*Path_Element {
+	if m != nil {
+		return m.Element
+	}
+	return nil
+}
+
+type Path_Element struct {
+	Type             *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
+	Id               *int64  `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
+	Name             *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *Path_Element) Reset()         { *m = Path_Element{} }
+func (m *Path_Element) String() string { return proto.CompactTextString(m) }
+func (*Path_Element) ProtoMessage()    {}
+
+func (m *Path_Element) GetType() string {
+	if m != nil && m.Type != nil {
+		return *m.Type
+	}
+	return ""
+}
+
+func (m *Path_Element) GetId() int64 {
+	if m != nil && m.Id != nil {
+		return *m.Id
+	}
+	return 0
+}
+
+func (m *Path_Element) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+type Reference struct {
+	App              *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+	NameSpace        *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+	Path             *Path   `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *Reference) Reset()         { *m = Reference{} }
+func (m *Reference) String() string { return proto.CompactTextString(m) }
+func (*Reference) ProtoMessage()    {}
+
+func (m *Reference) GetApp() string {
+	if m != nil && m.App != nil {
+		return *m.App
+	}
+	return ""
+}
+
+func (m *Reference) GetNameSpace() string {
+	if m != nil && m.NameSpace != nil {
+		return *m.NameSpace
+	}
+	return ""
+}
+
+func (m *Reference) GetPath() *Path {
+	if m != nil {
+		return m.Path
+	}
+	return nil
+}
+
+type User struct {
+	Email             *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+	AuthDomain        *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"`
+	Nickname          *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
+	FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"`
+	FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"`
+	XXX_unrecognized  []byte  `json:"-"`
+}
+
+func (m *User) Reset()         { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage()    {}
+
+func (m *User) GetEmail() string {
+	if m != nil && m.Email != nil {
+		return *m.Email
+	}
+	return ""
+}
+
+func (m *User) GetAuthDomain() string {
+	if m != nil && m.AuthDomain != nil {
+		return *m.AuthDomain
+	}
+	return ""
+}
+
+func (m *User) GetNickname() string {
+	if m != nil && m.Nickname != nil {
+		return *m.Nickname
+	}
+	return ""
+}
+
+func (m *User) GetFederatedIdentity() string {
+	if m != nil && m.FederatedIdentity != nil {
+		return *m.FederatedIdentity
+	}
+	return ""
+}
+
+func (m *User) GetFederatedProvider() string {
+	if m != nil && m.FederatedProvider != nil {
+		return *m.FederatedProvider
+	}
+	return ""
+}
+
+type EntityProto struct {
+	Key              *Reference        `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
+	EntityGroup      *Path             `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"`
+	Owner            *User             `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
+	Kind             *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
+	KindUri          *string           `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"`
+	Property         []*Property       `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+	RawProperty      []*Property       `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"`
+	Rank             *int32            `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *EntityProto) Reset()         { *m = EntityProto{} }
+func (m *EntityProto) String() string { return proto.CompactTextString(m) }
+func (*EntityProto) ProtoMessage()    {}
+
+func (m *EntityProto) GetKey() *Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *EntityProto) GetEntityGroup() *Path {
+	if m != nil {
+		return m.EntityGroup
+	}
+	return nil
+}
+
+func (m *EntityProto) GetOwner() *User {
+	if m != nil {
+		return m.Owner
+	}
+	return nil
+}
+
+func (m *EntityProto) GetKind() EntityProto_Kind {
+	if m != nil && m.Kind != nil {
+		return *m.Kind
+	}
+	return EntityProto_GD_CONTACT
+}
+
+func (m *EntityProto) GetKindUri() string {
+	if m != nil && m.KindUri != nil {
+		return *m.KindUri
+	}
+	return ""
+}
+
+func (m *EntityProto) GetProperty() []*Property {
+	if m != nil {
+		return m.Property
+	}
+	return nil
+}
+
+func (m *EntityProto) GetRawProperty() []*Property {
+	if m != nil {
+		return m.RawProperty
+	}
+	return nil
+}
+
+func (m *EntityProto) GetRank() int32 {
+	if m != nil && m.Rank != nil {
+		return *m.Rank
+	}
+	return 0
+}
+
+type CompositeProperty struct {
+	IndexId          *int64   `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"`
+	Value            []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *CompositeProperty) Reset()         { *m = CompositeProperty{} }
+func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
+func (*CompositeProperty) ProtoMessage()    {}
+
+func (m *CompositeProperty) GetIndexId() int64 {
+	if m != nil && m.IndexId != nil {
+		return *m.IndexId
+	}
+	return 0
+}
+
+func (m *CompositeProperty) GetValue() []string {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+type Index struct {
+	EntityType       *string           `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"`
+	Ancestor         *bool             `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
+	Property         []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *Index) Reset()         { *m = Index{} }
+func (m *Index) String() string { return proto.CompactTextString(m) }
+func (*Index) ProtoMessage()    {}
+
+func (m *Index) GetEntityType() string {
+	if m != nil && m.EntityType != nil {
+		return *m.EntityType
+	}
+	return ""
+}
+
+func (m *Index) GetAncestor() bool {
+	if m != nil && m.Ancestor != nil {
+		return *m.Ancestor
+	}
+	return false
+}
+
+func (m *Index) GetProperty() []*Index_Property {
+	if m != nil {
+		return m.Property
+	}
+	return nil
+}
+
+type Index_Property struct {
+	Name             *string                   `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+	Direction        *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
+	XXX_unrecognized []byte                    `json:"-"`
+}
+
+func (m *Index_Property) Reset()         { *m = Index_Property{} }
+func (m *Index_Property) String() string { return proto.CompactTextString(m) }
+func (*Index_Property) ProtoMessage()    {}
+
+const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
+
+func (m *Index_Property) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *Index_Property) GetDirection() Index_Property_Direction {
+	if m != nil && m.Direction != nil {
+		return *m.Direction
+	}
+	return Default_Index_Property_Direction
+}
+
+type CompositeIndex struct {
+	AppId             *string               `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+	Id                *int64                `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
+	Definition        *Index                `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
+	State             *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
+	OnlyUseIfRequired *bool                 `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"`
+	XXX_unrecognized  []byte                `json:"-"`
+}
+
+func (m *CompositeIndex) Reset()         { *m = CompositeIndex{} }
+func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndex) ProtoMessage()    {}
+
+const Default_CompositeIndex_OnlyUseIfRequired bool = false
+
+func (m *CompositeIndex) GetAppId() string {
+	if m != nil && m.AppId != nil {
+		return *m.AppId
+	}
+	return ""
+}
+
+func (m *CompositeIndex) GetId() int64 {
+	if m != nil && m.Id != nil {
+		return *m.Id
+	}
+	return 0
+}
+
+func (m *CompositeIndex) GetDefinition() *Index {
+	if m != nil {
+		return m.Definition
+	}
+	return nil
+}
+
+func (m *CompositeIndex) GetState() CompositeIndex_State {
+	if m != nil && m.State != nil {
+		return *m.State
+	}
+	return CompositeIndex_WRITE_ONLY
+}
+
+func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
+	if m != nil && m.OnlyUseIfRequired != nil {
+		return *m.OnlyUseIfRequired
+	}
+	return Default_CompositeIndex_OnlyUseIfRequired
+}
+
+type IndexPostfix struct {
+	IndexValue       []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"`
+	Key              *Reference                 `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
+	Before           *bool                      `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
+	XXX_unrecognized []byte                     `json:"-"`
+}
+
+func (m *IndexPostfix) Reset()         { *m = IndexPostfix{} }
+func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix) ProtoMessage()    {}
+
+const Default_IndexPostfix_Before bool = true
+
+func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
+	if m != nil {
+		return m.IndexValue
+	}
+	return nil
+}
+
+func (m *IndexPostfix) GetKey() *Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *IndexPostfix) GetBefore() bool {
+	if m != nil && m.Before != nil {
+		return *m.Before
+	}
+	return Default_IndexPostfix_Before
+}
+
+type IndexPostfix_IndexValue struct {
+	PropertyName     *string        `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"`
+	Value            *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte         `json:"-"`
+}
+
+func (m *IndexPostfix_IndexValue) Reset()         { *m = IndexPostfix_IndexValue{} }
+func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix_IndexValue) ProtoMessage()    {}
+
+func (m *IndexPostfix_IndexValue) GetPropertyName() string {
+	if m != nil && m.PropertyName != nil {
+		return *m.PropertyName
+	}
+	return ""
+}
+
+func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+type IndexPosition struct {
+	Key              *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
+	Before           *bool   `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *IndexPosition) Reset()         { *m = IndexPosition{} }
+func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
+func (*IndexPosition) ProtoMessage()    {}
+
+const Default_IndexPosition_Before bool = true
+
+func (m *IndexPosition) GetKey() string {
+	if m != nil && m.Key != nil {
+		return *m.Key
+	}
+	return ""
+}
+
+func (m *IndexPosition) GetBefore() bool {
+	if m != nil && m.Before != nil {
+		return *m.Before
+	}
+	return Default_IndexPosition_Before
+}
+
+type Snapshot struct {
+	Ts               *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset()         { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage()    {}
+
+func (m *Snapshot) GetTs() int64 {
+	if m != nil && m.Ts != nil {
+		return *m.Ts
+	}
+	return 0
+}
+
+type InternalHeader struct {
+	Qos              *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *InternalHeader) Reset()         { *m = InternalHeader{} }
+func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
+func (*InternalHeader) ProtoMessage()    {}
+
+func (m *InternalHeader) GetQos() string {
+	if m != nil && m.Qos != nil {
+		return *m.Qos
+	}
+	return ""
+}
+
+type Transaction struct {
+	Header           *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+	Handle           *uint64         `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
+	App              *string         `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
+	MarkChanges      *bool           `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *Transaction) Reset()         { *m = Transaction{} }
+func (m *Transaction) String() string { return proto.CompactTextString(m) }
+func (*Transaction) ProtoMessage()    {}
+
+const Default_Transaction_MarkChanges bool = false
+
+func (m *Transaction) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *Transaction) GetHandle() uint64 {
+	if m != nil && m.Handle != nil {
+		return *m.Handle
+	}
+	return 0
+}
+
+func (m *Transaction) GetApp() string {
+	if m != nil && m.App != nil {
+		return *m.App
+	}
+	return ""
+}
+
+func (m *Transaction) GetMarkChanges() bool {
+	if m != nil && m.MarkChanges != nil {
+		return *m.MarkChanges
+	}
+	return Default_Transaction_MarkChanges
+}
+
+type Query struct {
+	Header              *InternalHeader   `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
+	App                 *string           `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+	NameSpace           *string           `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"`
+	Kind                *string           `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
+	Ancestor            *Reference        `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
+	Filter              []*Query_Filter   `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"`
+	SearchQuery         *string           `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"`
+	Order               []*Query_Order    `protobuf:"group,9,rep,name=Order" json:"order,omitempty"`
+	Hint                *Query_Hint       `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
+	Count               *int32            `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
+	Offset              *int32            `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
+	Limit               *int32            `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
+	CompiledCursor      *CompiledCursor   `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+	EndCompiledCursor   *CompiledCursor   `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"`
+	CompositeIndex      []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"`
+	RequirePerfectPlan  *bool             `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"`
+	KeysOnly            *bool             `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"`
+	Transaction         *Transaction      `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
+	Compile             *bool             `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
+	FailoverMs          *int64            `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"`
+	Strong              *bool             `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
+	PropertyName        []string          `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"`
+	GroupByPropertyName []string          `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"`
+	Distinct            *bool             `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
+	MinSafeTimeSeconds  *int64            `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"`
+	SafeReplicaName     []string          `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"`
+	PersistOffset       *bool             `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"`
+	XXX_unrecognized    []byte            `json:"-"`
+}
+
+func (m *Query) Reset()         { *m = Query{} }
+func (m *Query) String() string { return proto.CompactTextString(m) }
+func (*Query) ProtoMessage()    {}
+
+const Default_Query_Offset int32 = 0
+const Default_Query_RequirePerfectPlan bool = false
+const Default_Query_KeysOnly bool = false
+const Default_Query_Compile bool = false
+const Default_Query_PersistOffset bool = false
+
+func (m *Query) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *Query) GetApp() string {
+	if m != nil && m.App != nil {
+		return *m.App
+	}
+	return ""
+}
+
+func (m *Query) GetNameSpace() string {
+	if m != nil && m.NameSpace != nil {
+		return *m.NameSpace
+	}
+	return ""
+}
+
+func (m *Query) GetKind() string {
+	if m != nil && m.Kind != nil {
+		return *m.Kind
+	}
+	return ""
+}
+
+func (m *Query) GetAncestor() *Reference {
+	if m != nil {
+		return m.Ancestor
+	}
+	return nil
+}
+
+func (m *Query) GetFilter() []*Query_Filter {
+	if m != nil {
+		return m.Filter
+	}
+	return nil
+}
+
+func (m *Query) GetSearchQuery() string {
+	if m != nil && m.SearchQuery != nil {
+		return *m.SearchQuery
+	}
+	return ""
+}
+
+func (m *Query) GetOrder() []*Query_Order {
+	if m != nil {
+		return m.Order
+	}
+	return nil
+}
+
+func (m *Query) GetHint() Query_Hint {
+	if m != nil && m.Hint != nil {
+		return *m.Hint
+	}
+	return Query_ORDER_FIRST
+}
+
+func (m *Query) GetCount() int32 {
+	if m != nil && m.Count != nil {
+		return *m.Count
+	}
+	return 0
+}
+
+func (m *Query) GetOffset() int32 {
+	if m != nil && m.Offset != nil {
+		return *m.Offset
+	}
+	return Default_Query_Offset
+}
+
+func (m *Query) GetLimit() int32 {
+	if m != nil && m.Limit != nil {
+		return *m.Limit
+	}
+	return 0
+}
+
+func (m *Query) GetCompiledCursor() *CompiledCursor {
+	if m != nil {
+		return m.CompiledCursor
+	}
+	return nil
+}
+
+func (m *Query) GetEndCompiledCursor() *CompiledCursor {
+	if m != nil {
+		return m.EndCompiledCursor
+	}
+	return nil
+}
+
+func (m *Query) GetCompositeIndex() []*CompositeIndex {
+	if m != nil {
+		return m.CompositeIndex
+	}
+	return nil
+}
+
+func (m *Query) GetRequirePerfectPlan() bool {
+	if m != nil && m.RequirePerfectPlan != nil {
+		return *m.RequirePerfectPlan
+	}
+	return Default_Query_RequirePerfectPlan
+}
+
+func (m *Query) GetKeysOnly() bool {
+	if m != nil && m.KeysOnly != nil {
+		return *m.KeysOnly
+	}
+	return Default_Query_KeysOnly
+}
+
+func (m *Query) GetTransaction() *Transaction {
+	if m != nil {
+		return m.Transaction
+	}
+	return nil
+}
+
+func (m *Query) GetCompile() bool {
+	if m != nil && m.Compile != nil {
+		return *m.Compile
+	}
+	return Default_Query_Compile
+}
+
+func (m *Query) GetFailoverMs() int64 {
+	if m != nil && m.FailoverMs != nil {
+		return *m.FailoverMs
+	}
+	return 0
+}
+
+func (m *Query) GetStrong() bool {
+	if m != nil && m.Strong != nil {
+		return *m.Strong
+	}
+	return false
+}
+
+func (m *Query) GetPropertyName() []string {
+	if m != nil {
+		return m.PropertyName
+	}
+	return nil
+}
+
+func (m *Query) GetGroupByPropertyName() []string {
+	if m != nil {
+		return m.GroupByPropertyName
+	}
+	return nil
+}
+
+func (m *Query) GetDistinct() bool {
+	if m != nil && m.Distinct != nil {
+		return *m.Distinct
+	}
+	return false
+}
+
+func (m *Query) GetMinSafeTimeSeconds() int64 {
+	if m != nil && m.MinSafeTimeSeconds != nil {
+		return *m.MinSafeTimeSeconds
+	}
+	return 0
+}
+
+func (m *Query) GetSafeReplicaName() []string {
+	if m != nil {
+		return m.SafeReplicaName
+	}
+	return nil
+}
+
+func (m *Query) GetPersistOffset() bool {
+	if m != nil && m.PersistOffset != nil {
+		return *m.PersistOffset
+	}
+	return Default_Query_PersistOffset
+}
+
+type Query_Filter struct {
+	Op               *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
+	Property         []*Property            `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+	XXX_unrecognized []byte                 `json:"-"`
+}
+
+func (m *Query_Filter) Reset()         { *m = Query_Filter{} }
+func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
+func (*Query_Filter) ProtoMessage()    {}
+
+func (m *Query_Filter) GetOp() Query_Filter_Operator {
+	if m != nil && m.Op != nil {
+		return *m.Op
+	}
+	return Query_Filter_LESS_THAN
+}
+
+func (m *Query_Filter) GetProperty() []*Property {
+	if m != nil {
+		return m.Property
+	}
+	return nil
+}
+
+type Query_Order struct {
+	Property         *string                `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
+	Direction        *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
+	XXX_unrecognized []byte                 `json:"-"`
+}
+
+func (m *Query_Order) Reset()         { *m = Query_Order{} }
+func (m *Query_Order) String() string { return proto.CompactTextString(m) }
+func (*Query_Order) ProtoMessage()    {}
+
+const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
+
+func (m *Query_Order) GetProperty() string {
+	if m != nil && m.Property != nil {
+		return *m.Property
+	}
+	return ""
+}
+
+func (m *Query_Order) GetDirection() Query_Order_Direction {
+	if m != nil && m.Direction != nil {
+		return *m.Direction
+	}
+	return Default_Query_Order_Direction
+}
+
+type CompiledQuery struct {
+	Primaryscan       *CompiledQuery_PrimaryScan     `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"`
+	Mergejoinscan     []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"`
+	IndexDef          *Index                         `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"`
+	Offset            *int32                         `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
+	Limit             *int32                         `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
+	KeysOnly          *bool                          `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"`
+	PropertyName      []string                       `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"`
+	DistinctInfixSize *int32                         `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"`
+	Entityfilter      *CompiledQuery_EntityFilter    `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"`
+	XXX_unrecognized  []byte                         `json:"-"`
+}
+
+func (m *CompiledQuery) Reset()         { *m = CompiledQuery{} }
+func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery) ProtoMessage()    {}
+
+const Default_CompiledQuery_Offset int32 = 0
+
+func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
+	if m != nil {
+		return m.Primaryscan
+	}
+	return nil
+}
+
+func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
+	if m != nil {
+		return m.Mergejoinscan
+	}
+	return nil
+}
+
+func (m *CompiledQuery) GetIndexDef() *Index {
+	if m != nil {
+		return m.IndexDef
+	}
+	return nil
+}
+
+func (m *CompiledQuery) GetOffset() int32 {
+	if m != nil && m.Offset != nil {
+		return *m.Offset
+	}
+	return Default_CompiledQuery_Offset
+}
+
+func (m *CompiledQuery) GetLimit() int32 {
+	if m != nil && m.Limit != nil {
+		return *m.Limit
+	}
+	return 0
+}
+
+func (m *CompiledQuery) GetKeysOnly() bool {
+	if m != nil && m.KeysOnly != nil {
+		return *m.KeysOnly
+	}
+	return false
+}
+
+func (m *CompiledQuery) GetPropertyName() []string {
+	if m != nil {
+		return m.PropertyName
+	}
+	return nil
+}
+
+func (m *CompiledQuery) GetDistinctInfixSize() int32 {
+	if m != nil && m.DistinctInfixSize != nil {
+		return *m.DistinctInfixSize
+	}
+	return 0
+}
+
+func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
+	if m != nil {
+		return m.Entityfilter
+	}
+	return nil
+}
+
+type CompiledQuery_PrimaryScan struct {
+	IndexName                  *string  `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"`
+	StartKey                   *string  `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"`
+	StartInclusive             *bool    `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"`
+	EndKey                     *string  `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"`
+	EndInclusive               *bool    `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"`
+	StartPostfixValue          []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"`
+	EndPostfixValue            []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"`
+	EndUnappliedLogTimestampUs *int64   `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"`
+	XXX_unrecognized           []byte   `json:"-"`
+}
+
+func (m *CompiledQuery_PrimaryScan) Reset()         { *m = CompiledQuery_PrimaryScan{} }
+func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_PrimaryScan) ProtoMessage()    {}
+
+func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
+	if m != nil && m.IndexName != nil {
+		return *m.IndexName
+	}
+	return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
+	if m != nil && m.StartKey != nil {
+		return *m.StartKey
+	}
+	return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
+	if m != nil && m.StartInclusive != nil {
+		return *m.StartInclusive
+	}
+	return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
+	if m != nil && m.EndKey != nil {
+		return *m.EndKey
+	}
+	return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
+	if m != nil && m.EndInclusive != nil {
+		return *m.EndInclusive
+	}
+	return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
+	if m != nil {
+		return m.StartPostfixValue
+	}
+	return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
+	if m != nil {
+		return m.EndPostfixValue
+	}
+	return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
+	if m != nil && m.EndUnappliedLogTimestampUs != nil {
+		return *m.EndUnappliedLogTimestampUs
+	}
+	return 0
+}
+
+type CompiledQuery_MergeJoinScan struct {
+	IndexName        *string  `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"`
+	PrefixValue      []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"`
+	ValuePrefix      *bool    `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *CompiledQuery_MergeJoinScan) Reset()         { *m = CompiledQuery_MergeJoinScan{} }
+func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_MergeJoinScan) ProtoMessage()    {}
+
+const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
+
+func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
+	if m != nil && m.IndexName != nil {
+		return *m.IndexName
+	}
+	return ""
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
+	if m != nil {
+		return m.PrefixValue
+	}
+	return nil
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
+	if m != nil && m.ValuePrefix != nil {
+		return *m.ValuePrefix
+	}
+	return Default_CompiledQuery_MergeJoinScan_ValuePrefix
+}
+
+type CompiledQuery_EntityFilter struct {
+	Distinct         *bool      `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
+	Kind             *string    `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
+	Ancestor         *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
+	XXX_unrecognized []byte     `json:"-"`
+}
+
+func (m *CompiledQuery_EntityFilter) Reset()         { *m = CompiledQuery_EntityFilter{} }
+func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_EntityFilter) ProtoMessage()    {}
+
+const Default_CompiledQuery_EntityFilter_Distinct bool = false
+
+func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
+	if m != nil && m.Distinct != nil {
+		return *m.Distinct
+	}
+	return Default_CompiledQuery_EntityFilter_Distinct
+}
+
+func (m *CompiledQuery_EntityFilter) GetKind() string {
+	if m != nil && m.Kind != nil {
+		return *m.Kind
+	}
+	return ""
+}
+
+func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
+	if m != nil {
+		return m.Ancestor
+	}
+	return nil
+}
+
+type CompiledCursor struct {
+	Position         *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"`
+	XXX_unrecognized []byte                   `json:"-"`
+}
+
+func (m *CompiledCursor) Reset()         { *m = CompiledCursor{} }
+func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor) ProtoMessage()    {}
+
+func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
+	if m != nil {
+		return m.Position
+	}
+	return nil
+}
+
+type CompiledCursor_Position struct {
+	StartKey         *string                               `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"`
+	Indexvalue       []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"`
+	Key              *Reference                            `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
+	StartInclusive   *bool                                 `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"`
+	XXX_unrecognized []byte                                `json:"-"`
+}
+
+func (m *CompiledCursor_Position) Reset()         { *m = CompiledCursor_Position{} }
+func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position) ProtoMessage()    {}
+
+const Default_CompiledCursor_Position_StartInclusive bool = true
+
+func (m *CompiledCursor_Position) GetStartKey() string {
+	if m != nil && m.StartKey != nil {
+		return *m.StartKey
+	}
+	return ""
+}
+
+func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
+	if m != nil {
+		return m.Indexvalue
+	}
+	return nil
+}
+
+func (m *CompiledCursor_Position) GetKey() *Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *CompiledCursor_Position) GetStartInclusive() bool {
+	if m != nil && m.StartInclusive != nil {
+		return *m.StartInclusive
+	}
+	return Default_CompiledCursor_Position_StartInclusive
+}
+
+type CompiledCursor_Position_IndexValue struct {
+	Property         *string        `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
+	Value            *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
+	XXX_unrecognized []byte         `json:"-"`
+}
+
+func (m *CompiledCursor_Position_IndexValue) Reset()         { *m = CompiledCursor_Position_IndexValue{} }
+func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position_IndexValue) ProtoMessage()    {}
+
+func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
+	if m != nil && m.Property != nil {
+		return *m.Property
+	}
+	return ""
+}
+
+func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+type Cursor struct {
+	Cursor           *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
+	App              *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *Cursor) Reset()         { *m = Cursor{} }
+func (m *Cursor) String() string { return proto.CompactTextString(m) }
+func (*Cursor) ProtoMessage()    {}
+
+func (m *Cursor) GetCursor() uint64 {
+	if m != nil && m.Cursor != nil {
+		return *m.Cursor
+	}
+	return 0
+}
+
+func (m *Cursor) GetApp() string {
+	if m != nil && m.App != nil {
+		return *m.App
+	}
+	return ""
+}
+
+type Error struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Error) Reset()         { *m = Error{} }
+func (m *Error) String() string { return proto.CompactTextString(m) }
+func (*Error) ProtoMessage()    {}
+
+type Cost struct {
+	IndexWrites             *int32           `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"`
+	IndexWriteBytes         *int32           `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"`
+	EntityWrites            *int32           `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"`
+	EntityWriteBytes        *int32           `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"`
+	Commitcost              *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"`
+	ApproximateStorageDelta *int32           `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"`
+	IdSequenceUpdates       *int32           `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"`
+	XXX_unrecognized        []byte           `json:"-"`
+}
+
+func (m *Cost) Reset()         { *m = Cost{} }
+func (m *Cost) String() string { return proto.CompactTextString(m) }
+func (*Cost) ProtoMessage()    {}
+
+func (m *Cost) GetIndexWrites() int32 {
+	if m != nil && m.IndexWrites != nil {
+		return *m.IndexWrites
+	}
+	return 0
+}
+
+func (m *Cost) GetIndexWriteBytes() int32 {
+	if m != nil && m.IndexWriteBytes != nil {
+		return *m.IndexWriteBytes
+	}
+	return 0
+}
+
+func (m *Cost) GetEntityWrites() int32 {
+	if m != nil && m.EntityWrites != nil {
+		return *m.EntityWrites
+	}
+	return 0
+}
+
+func (m *Cost) GetEntityWriteBytes() int32 {
+	if m != nil && m.EntityWriteBytes != nil {
+		return *m.EntityWriteBytes
+	}
+	return 0
+}
+
+func (m *Cost) GetCommitcost() *Cost_CommitCost {
+	if m != nil {
+		return m.Commitcost
+	}
+	return nil
+}
+
+func (m *Cost) GetApproximateStorageDelta() int32 {
+	if m != nil && m.ApproximateStorageDelta != nil {
+		return *m.ApproximateStorageDelta
+	}
+	return 0
+}
+
+func (m *Cost) GetIdSequenceUpdates() int32 {
+	if m != nil && m.IdSequenceUpdates != nil {
+		return *m.IdSequenceUpdates
+	}
+	return 0
+}
+
+type Cost_CommitCost struct {
+	RequestedEntityPuts    *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"`
+	RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"`
+	XXX_unrecognized       []byte `json:"-"`
+}
+
+func (m *Cost_CommitCost) Reset()         { *m = Cost_CommitCost{} }
+func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
+func (*Cost_CommitCost) ProtoMessage()    {}
+
+func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
+	if m != nil && m.RequestedEntityPuts != nil {
+		return *m.RequestedEntityPuts
+	}
+	return 0
+}
+
+func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
+	if m != nil && m.RequestedEntityDeletes != nil {
+		return *m.RequestedEntityDeletes
+	}
+	return 0
+}
+
+type GetRequest struct {
+	Header           *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
+	Key              []*Reference    `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+	Transaction      *Transaction    `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+	FailoverMs       *int64          `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"`
+	Strong           *bool           `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
+	AllowDeferred    *bool           `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *GetRequest) Reset()         { *m = GetRequest{} }
+func (m *GetRequest) String() string { return proto.CompactTextString(m) }
+func (*GetRequest) ProtoMessage()    {}
+
+const Default_GetRequest_AllowDeferred bool = false
+
+func (m *GetRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *GetRequest) GetKey() []*Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *GetRequest) GetTransaction() *Transaction {
+	if m != nil {
+		return m.Transaction
+	}
+	return nil
+}
+
+func (m *GetRequest) GetFailoverMs() int64 {
+	if m != nil && m.FailoverMs != nil {
+		return *m.FailoverMs
+	}
+	return 0
+}
+
+func (m *GetRequest) GetStrong() bool {
+	if m != nil && m.Strong != nil {
+		return *m.Strong
+	}
+	return false
+}
+
+func (m *GetRequest) GetAllowDeferred() bool {
+	if m != nil && m.AllowDeferred != nil {
+		return *m.AllowDeferred
+	}
+	return Default_GetRequest_AllowDeferred
+}
+
+type GetResponse struct {
+	Entity           []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"`
+	Deferred         []*Reference          `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
+	InOrder          *bool                 `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"`
+	XXX_unrecognized []byte                `json:"-"`
+}
+
+func (m *GetResponse) Reset()         { *m = GetResponse{} }
+func (m *GetResponse) String() string { return proto.CompactTextString(m) }
+func (*GetResponse) ProtoMessage()    {}
+
+const Default_GetResponse_InOrder bool = true
+
+func (m *GetResponse) GetEntity() []*GetResponse_Entity {
+	if m != nil {
+		return m.Entity
+	}
+	return nil
+}
+
+func (m *GetResponse) GetDeferred() []*Reference {
+	if m != nil {
+		return m.Deferred
+	}
+	return nil
+}
+
+func (m *GetResponse) GetInOrder() bool {
+	if m != nil && m.InOrder != nil {
+		return *m.InOrder
+	}
+	return Default_GetResponse_InOrder
+}
+
+type GetResponse_Entity struct {
+	Entity           *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
+	Key              *Reference   `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
+	Version          *int64       `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte       `json:"-"`
+}
+
+func (m *GetResponse_Entity) Reset()         { *m = GetResponse_Entity{} }
+func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
+func (*GetResponse_Entity) ProtoMessage()    {}
+
+func (m *GetResponse_Entity) GetEntity() *EntityProto {
+	if m != nil {
+		return m.Entity
+	}
+	return nil
+}
+
+func (m *GetResponse_Entity) GetKey() *Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *GetResponse_Entity) GetVersion() int64 {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return 0
+}
+
+type PutRequest struct {
+	Header           *InternalHeader          `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
+	Entity           []*EntityProto           `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
+	Transaction      *Transaction             `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+	CompositeIndex   []*CompositeIndex        `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"`
+	Trusted          *bool                    `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+	Force            *bool                    `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+	MarkChanges      *bool                    `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+	Snapshot         []*Snapshot              `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+	AutoIdPolicy     *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
+	XXX_unrecognized []byte                   `json:"-"`
+}
+
+func (m *PutRequest) Reset()         { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage()    {}
+
+const Default_PutRequest_Trusted bool = false
+const Default_PutRequest_Force bool = false
+const Default_PutRequest_MarkChanges bool = false
+const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
+
+func (m *PutRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *PutRequest) GetEntity() []*EntityProto {
+	if m != nil {
+		return m.Entity
+	}
+	return nil
+}
+
+func (m *PutRequest) GetTransaction() *Transaction {
+	if m != nil {
+		return m.Transaction
+	}
+	return nil
+}
+
+func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
+	if m != nil {
+		return m.CompositeIndex
+	}
+	return nil
+}
+
+func (m *PutRequest) GetTrusted() bool {
+	if m != nil && m.Trusted != nil {
+		return *m.Trusted
+	}
+	return Default_PutRequest_Trusted
+}
+
+func (m *PutRequest) GetForce() bool {
+	if m != nil && m.Force != nil {
+		return *m.Force
+	}
+	return Default_PutRequest_Force
+}
+
+func (m *PutRequest) GetMarkChanges() bool {
+	if m != nil && m.MarkChanges != nil {
+		return *m.MarkChanges
+	}
+	return Default_PutRequest_MarkChanges
+}
+
+func (m *PutRequest) GetSnapshot() []*Snapshot {
+	if m != nil {
+		return m.Snapshot
+	}
+	return nil
+}
+
+func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
+	if m != nil && m.AutoIdPolicy != nil {
+		return *m.AutoIdPolicy
+	}
+	return Default_PutRequest_AutoIdPolicy
+}
+
+type PutResponse struct {
+	Key              []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+	Cost             *Cost        `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
+	Version          []int64      `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte       `json:"-"`
+}
+
+func (m *PutResponse) Reset()         { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage()    {}
+
+func (m *PutResponse) GetKey() []*Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *PutResponse) GetCost() *Cost {
+	if m != nil {
+		return m.Cost
+	}
+	return nil
+}
+
+func (m *PutResponse) GetVersion() []int64 {
+	if m != nil {
+		return m.Version
+	}
+	return nil
+}
+
+type TouchRequest struct {
+	Header           *InternalHeader   `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+	Key              []*Reference      `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+	CompositeIndex   []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"`
+	Force            *bool             `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
+	Snapshot         []*Snapshot       `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *TouchRequest) Reset()         { *m = TouchRequest{} }
+func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
+func (*TouchRequest) ProtoMessage()    {}
+
+const Default_TouchRequest_Force bool = false
+
+func (m *TouchRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *TouchRequest) GetKey() []*Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
+	if m != nil {
+		return m.CompositeIndex
+	}
+	return nil
+}
+
+func (m *TouchRequest) GetForce() bool {
+	if m != nil && m.Force != nil {
+		return *m.Force
+	}
+	return Default_TouchRequest_Force
+}
+
+func (m *TouchRequest) GetSnapshot() []*Snapshot {
+	if m != nil {
+		return m.Snapshot
+	}
+	return nil
+}
+
+type TouchResponse struct {
+	Cost             *Cost  `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TouchResponse) Reset()         { *m = TouchResponse{} }
+func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
+func (*TouchResponse) ProtoMessage()    {}
+
+func (m *TouchResponse) GetCost() *Cost {
+	if m != nil {
+		return m.Cost
+	}
+	return nil
+}
+
+type DeleteRequest struct {
+	Header           *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+	Key              []*Reference    `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
+	Transaction      *Transaction    `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
+	Trusted          *bool           `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+	Force            *bool           `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+	MarkChanges      *bool           `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+	Snapshot         []*Snapshot     `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *DeleteRequest) Reset()         { *m = DeleteRequest{} }
+func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRequest) ProtoMessage()    {}
+
+const Default_DeleteRequest_Trusted bool = false
+const Default_DeleteRequest_Force bool = false
+const Default_DeleteRequest_MarkChanges bool = false
+
+func (m *DeleteRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *DeleteRequest) GetKey() []*Reference {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *DeleteRequest) GetTransaction() *Transaction {
+	if m != nil {
+		return m.Transaction
+	}
+	return nil
+}
+
+func (m *DeleteRequest) GetTrusted() bool {
+	if m != nil && m.Trusted != nil {
+		return *m.Trusted
+	}
+	return Default_DeleteRequest_Trusted
+}
+
+func (m *DeleteRequest) GetForce() bool {
+	if m != nil && m.Force != nil {
+		return *m.Force
+	}
+	return Default_DeleteRequest_Force
+}
+
+func (m *DeleteRequest) GetMarkChanges() bool {
+	if m != nil && m.MarkChanges != nil {
+		return *m.MarkChanges
+	}
+	return Default_DeleteRequest_MarkChanges
+}
+
+func (m *DeleteRequest) GetSnapshot() []*Snapshot {
+	if m != nil {
+		return m.Snapshot
+	}
+	return nil
+}
+
+type DeleteResponse struct {
+	Cost             *Cost   `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+	Version          []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *DeleteResponse) Reset()         { *m = DeleteResponse{} }
+func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteResponse) ProtoMessage()    {}
+
+func (m *DeleteResponse) GetCost() *Cost {
+	if m != nil {
+		return m.Cost
+	}
+	return nil
+}
+
+func (m *DeleteResponse) GetVersion() []int64 {
+	if m != nil {
+		return m.Version
+	}
+	return nil
+}
+
+type NextRequest struct {
+	Header           *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
+	Cursor           *Cursor         `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
+	Count            *int32          `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
+	Offset           *int32          `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
+	Compile          *bool           `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *NextRequest) Reset()         { *m = NextRequest{} }
+func (m *NextRequest) String() string { return proto.CompactTextString(m) }
+func (*NextRequest) ProtoMessage()    {}
+
+const Default_NextRequest_Offset int32 = 0
+const Default_NextRequest_Compile bool = false
+
+func (m *NextRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *NextRequest) GetCursor() *Cursor {
+	if m != nil {
+		return m.Cursor
+	}
+	return nil
+}
+
+func (m *NextRequest) GetCount() int32 {
+	if m != nil && m.Count != nil {
+		return *m.Count
+	}
+	return 0
+}
+
+func (m *NextRequest) GetOffset() int32 {
+	if m != nil && m.Offset != nil {
+		return *m.Offset
+	}
+	return Default_NextRequest_Offset
+}
+
+func (m *NextRequest) GetCompile() bool {
+	if m != nil && m.Compile != nil {
+		return *m.Compile
+	}
+	return Default_NextRequest_Compile
+}
+
+type QueryResult struct {
+	Cursor           *Cursor           `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
+	Result           []*EntityProto    `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
+	SkippedResults   *int32            `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"`
+	MoreResults      *bool             `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"`
+	KeysOnly         *bool             `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"`
+	IndexOnly        *bool             `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"`
+	SmallOps         *bool             `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"`
+	CompiledQuery    *CompiledQuery    `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"`
+	CompiledCursor   *CompiledCursor   `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+	Index            []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
+	Version          []int64           `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *QueryResult) Reset()         { *m = QueryResult{} }
+func (m *QueryResult) String() string { return proto.CompactTextString(m) }
+func (*QueryResult) ProtoMessage()    {}
+
+func (m *QueryResult) GetCursor() *Cursor {
+	if m != nil {
+		return m.Cursor
+	}
+	return nil
+}
+
+func (m *QueryResult) GetResult() []*EntityProto {
+	if m != nil {
+		return m.Result
+	}
+	return nil
+}
+
+func (m *QueryResult) GetSkippedResults() int32 {
+	if m != nil && m.SkippedResults != nil {
+		return *m.SkippedResults
+	}
+	return 0
+}
+
+func (m *QueryResult) GetMoreResults() bool {
+	if m != nil && m.MoreResults != nil {
+		return *m.MoreResults
+	}
+	return false
+}
+
+func (m *QueryResult) GetKeysOnly() bool {
+	if m != nil && m.KeysOnly != nil {
+		return *m.KeysOnly
+	}
+	return false
+}
+
+func (m *QueryResult) GetIndexOnly() bool {
+	if m != nil && m.IndexOnly != nil {
+		return *m.IndexOnly
+	}
+	return false
+}
+
+func (m *QueryResult) GetSmallOps() bool {
+	if m != nil && m.SmallOps != nil {
+		return *m.SmallOps
+	}
+	return false
+}
+
+func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
+	if m != nil {
+		return m.CompiledQuery
+	}
+	return nil
+}
+
+func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
+	if m != nil {
+		return m.CompiledCursor
+	}
+	return nil
+}
+
+func (m *QueryResult) GetIndex() []*CompositeIndex {
+	if m != nil {
+		return m.Index
+	}
+	return nil
+}
+
+func (m *QueryResult) GetVersion() []int64 {
+	if m != nil {
+		return m.Version
+	}
+	return nil
+}
+
+type AllocateIdsRequest struct {
+	Header           *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+	ModelKey         *Reference      `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"`
+	Size             *int64          `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
+	Max              *int64          `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
+	Reserve          []*Reference    `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *AllocateIdsRequest) Reset()         { *m = AllocateIdsRequest{} }
+func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsRequest) ProtoMessage()    {}
+
+func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AllocateIdsRequest) GetModelKey() *Reference {
+	if m != nil {
+		return m.ModelKey
+	}
+	return nil
+}
+
+func (m *AllocateIdsRequest) GetSize() int64 {
+	if m != nil && m.Size != nil {
+		return *m.Size
+	}
+	return 0
+}
+
+func (m *AllocateIdsRequest) GetMax() int64 {
+	if m != nil && m.Max != nil {
+		return *m.Max
+	}
+	return 0
+}
+
+func (m *AllocateIdsRequest) GetReserve() []*Reference {
+	if m != nil {
+		return m.Reserve
+	}
+	return nil
+}
+
+type AllocateIdsResponse struct {
+	Start            *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
+	End              *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
+	Cost             *Cost  `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsResponse) Reset()         { *m = AllocateIdsResponse{} }
+func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsResponse) ProtoMessage()    {}
+
+func (m *AllocateIdsResponse) GetStart() int64 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *AllocateIdsResponse) GetEnd() int64 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+func (m *AllocateIdsResponse) GetCost() *Cost {
+	if m != nil {
+		return m.Cost
+	}
+	return nil
+}
+
+type CompositeIndices struct {
+	Index            []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *CompositeIndices) Reset()         { *m = CompositeIndices{} }
+func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndices) ProtoMessage()    {}
+
+func (m *CompositeIndices) GetIndex() []*CompositeIndex {
+	if m != nil {
+		return m.Index
+	}
+	return nil
+}
+
+type AddActionsRequest struct {
+	Header           *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+	Transaction      *Transaction    `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
+	Action           []*Action       `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *AddActionsRequest) Reset()         { *m = AddActionsRequest{} }
+func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
+func (*AddActionsRequest) ProtoMessage()    {}
+
+func (m *AddActionsRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AddActionsRequest) GetTransaction() *Transaction {
+	if m != nil {
+		return m.Transaction
+	}
+	return nil
+}
+
+func (m *AddActionsRequest) GetAction() []*Action {
+	if m != nil {
+		return m.Action
+	}
+	return nil
+}
+
+type AddActionsResponse struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddActionsResponse) Reset()         { *m = AddActionsResponse{} }
+func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
+func (*AddActionsResponse) ProtoMessage()    {}
+
+type BeginTransactionRequest struct {
+	Header           *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+	App              *string         `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+	AllowMultipleEg  *bool           `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"`
+	XXX_unrecognized []byte          `json:"-"`
+}
+
+func (m *BeginTransactionRequest) Reset()         { *m = BeginTransactionRequest{} }
+func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
+func (*BeginTransactionRequest) ProtoMessage()    {}
+
+const Default_BeginTransactionRequest_AllowMultipleEg bool = false
+
+func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *BeginTransactionRequest) GetApp() string {
+	if m != nil && m.App != nil {
+		return *m.App
+	}
+	return ""
+}
+
+func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
+	if m != nil && m.AllowMultipleEg != nil {
+		return *m.AllowMultipleEg
+	}
+	return Default_BeginTransactionRequest_AllowMultipleEg
+}
+
+type CommitResponse struct {
+	Cost             *Cost                     `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+	Version          []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"`
+	XXX_unrecognized []byte                    `json:"-"`
+}
+
+func (m *CommitResponse) Reset()         { *m = CommitResponse{} }
+func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse) ProtoMessage()    {}
+
+func (m *CommitResponse) GetCost() *Cost {
+	if m != nil {
+		return m.Cost
+	}
+	return nil
+}
+
+func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
+	if m != nil {
+		return m.Version
+	}
+	return nil
+}
+
+type CommitResponse_Version struct {
+	RootEntityKey    *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"`
+	Version          *int64     `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte     `json:"-"`
+}
+
+func (m *CommitResponse_Version) Reset()         { *m = CommitResponse_Version{} }
+func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse_Version) ProtoMessage()    {}
+
+func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
+	if m != nil {
+		return m.RootEntityKey
+	}
+	return nil
+}
+
+func (m *CommitResponse_Version) GetVersion() int64 {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
new file mode 100755
index 00000000..e76f126f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
@@ -0,0 +1,541 @@
+syntax = "proto2";
+option go_package = "datastore";
+
+package appengine;
+
+message Action{}
+
+message PropertyValue {
+  optional int64 int64Value = 1;
+  optional bool booleanValue = 2;
+  optional string stringValue = 3;
+  optional double doubleValue = 4;
+
+  optional group PointValue = 5 {
+    required double x = 6;
+    required double y = 7;
+  }
+
+  optional group UserValue = 8 {
+    required string email = 9;
+    required string auth_domain = 10;
+    optional string nickname = 11;
+    optional string federated_identity = 21;
+    optional string federated_provider = 22;
+  }
+
+  optional group ReferenceValue = 12 {
+    required string app = 13;
+    optional string name_space = 20;
+    repeated group PathElement = 14 {
+      required string type = 15;
+      optional int64 id = 16;
+      optional string name = 17;
+    }
+  }
+}
+
+message Property {
+  enum Meaning {
+    NO_MEANING = 0;
+    BLOB = 14;
+    TEXT = 15;
+    BYTESTRING = 16;
+
+    ATOM_CATEGORY = 1;
+    ATOM_LINK = 2;
+    ATOM_TITLE = 3;
+    ATOM_CONTENT = 4;
+    ATOM_SUMMARY = 5;
+    ATOM_AUTHOR = 6;
+
+    GD_WHEN = 7;
+    GD_EMAIL = 8;
+    GEORSS_POINT = 9;
+    GD_IM = 10;
+
+    GD_PHONENUMBER = 11;
+    GD_POSTALADDRESS = 12;
+
+    GD_RATING = 13;
+
+    BLOBKEY = 17;
+    ENTITY_PROTO = 19;
+
+    INDEX_VALUE = 18;
+  };
+
+  optional Meaning meaning = 1 [default = NO_MEANING];
+  optional string meaning_uri = 2;
+
+  required string name = 3;
+
+  required PropertyValue value = 5;
+
+  required bool multiple = 4;
+
+  optional bool searchable = 6 [default=false];
+
+  enum FtsTokenizationOption {
+    HTML = 1;
+    ATOM = 2;
+  }
+
+  optional FtsTokenizationOption fts_tokenization_option = 8;
+
+  optional string locale = 9 [default = "en"];
+}
+
+message Path {
+  repeated group Element = 1 {
+    required string type = 2;
+    optional int64 id = 3;
+    optional string name = 4;
+  }
+}
+
+message Reference {
+  required string app = 13;
+  optional string name_space = 20;
+  required Path path = 14;
+}
+
+message User {
+  required string email = 1;
+  required string auth_domain = 2;
+  optional string nickname = 3;
+  optional string federated_identity = 6;
+  optional string federated_provider = 7;
+}
+
+message EntityProto {
+  required Reference key = 13;
+  required Path entity_group = 16;
+  optional User owner = 17;
+
+  enum Kind {
+    GD_CONTACT = 1;
+    GD_EVENT = 2;
+    GD_MESSAGE = 3;
+  }
+  optional Kind kind = 4;
+  optional string kind_uri = 5;
+
+  repeated Property property = 14;
+  repeated Property raw_property = 15;
+
+  optional int32 rank = 18;
+}
+
+message CompositeProperty {
+  required int64 index_id = 1;
+  repeated string value = 2;
+}
+
+message Index {
+  required string entity_type = 1;
+  required bool ancestor = 5;
+  repeated group Property = 2 {
+    required string name = 3;
+    enum Direction {
+      ASCENDING = 1;
+      DESCENDING = 2;
+    }
+    optional Direction direction = 4 [default = ASCENDING];
+  }
+}
+
+message CompositeIndex {
+  required string app_id = 1;
+  required int64 id = 2;
+  required Index definition = 3;
+
+  enum State {
+    WRITE_ONLY = 1;
+    READ_WRITE = 2;
+    DELETED = 3;
+    ERROR = 4;
+  }
+  required State state = 4;
+
+  optional bool only_use_if_required = 6 [default = false];
+}
+
+message IndexPostfix {
+  message IndexValue {
+    required string property_name = 1;
+    required PropertyValue value = 2;
+  }
+
+  repeated IndexValue index_value = 1;
+
+  optional Reference key = 2;
+
+  optional bool before = 3 [default=true];
+}
+
+message IndexPosition {
+  optional string key = 1;
+
+  optional bool before = 2 [default=true];
+}
+
+message Snapshot {
+  enum Status {
+    INACTIVE = 0;
+    ACTIVE = 1;
+  }
+
+  required int64 ts = 1;
+}
+
+message InternalHeader {
+  optional string qos = 1;
+}
+
+message Transaction {
+  optional InternalHeader header = 4;
+  required fixed64 handle = 1;
+  required string app = 2;
+  optional bool mark_changes = 3 [default = false];
+}
+
+message Query {
+  optional InternalHeader header = 39;
+
+  required string app = 1;
+  optional string name_space = 29;
+
+  optional string kind = 3;
+  optional Reference ancestor = 17;
+
+  repeated group Filter = 4 {
+    enum Operator {
+      LESS_THAN = 1;
+      LESS_THAN_OR_EQUAL = 2;
+      GREATER_THAN = 3;
+      GREATER_THAN_OR_EQUAL = 4;
+      EQUAL = 5;
+      IN = 6;
+      EXISTS = 7;
+    }
+
+    required Operator op = 6;
+    repeated Property property = 14;
+  }
+
+  optional string search_query = 8;
+
+  repeated group Order = 9 {
+    enum Direction {
+      ASCENDING = 1;
+      DESCENDING = 2;
+    }
+
+    required string property = 10;
+    optional Direction direction = 11 [default = ASCENDING];
+  }
+
+  enum Hint {
+    ORDER_FIRST = 1;
+    ANCESTOR_FIRST = 2;
+    FILTER_FIRST = 3;
+  }
+  optional Hint hint = 18;
+
+  optional int32 count = 23;
+
+  optional int32 offset = 12 [default = 0];
+
+  optional int32 limit = 16;
+
+  optional CompiledCursor compiled_cursor = 30;
+  optional CompiledCursor end_compiled_cursor = 31;
+
+  repeated CompositeIndex composite_index = 19;
+
+  optional bool require_perfect_plan = 20 [default = false];
+
+  optional bool keys_only = 21 [default = false];
+
+  optional Transaction transaction = 22;
+
+  optional bool compile = 25 [default = false];
+
+  optional int64 failover_ms = 26;
+
+  optional bool strong = 32;
+
+  repeated string property_name = 33;
+
+  repeated string group_by_property_name = 34;
+
+  optional bool distinct = 24;
+
+  optional int64 min_safe_time_seconds = 35;
+
+  repeated string safe_replica_name = 36;
+
+  optional bool persist_offset = 37 [default=false];
+}
+
+message CompiledQuery {
+  required group PrimaryScan = 1 {
+    optional string index_name = 2;
+
+    optional string start_key = 3;
+    optional bool start_inclusive = 4;
+    optional string end_key = 5;
+    optional bool end_inclusive = 6;
+
+    repeated string start_postfix_value = 22;
+    repeated string end_postfix_value = 23;
+
+    optional int64 end_unapplied_log_timestamp_us = 19;
+  }
+
+  repeated group MergeJoinScan = 7 {
+    required string index_name = 8;
+
+    repeated string prefix_value = 9;
+
+    optional bool value_prefix = 20 [default=false];
+  }
+
+  optional Index index_def = 21;
+
+  optional int32 offset = 10 [default = 0];
+
+  optional int32 limit = 11;
+
+  required bool keys_only = 12;
+
+  repeated string property_name = 24;
+
+  optional int32 distinct_infix_size = 25;
+
+  optional group EntityFilter = 13 {
+    optional bool distinct = 14 [default=false];
+
+    optional string kind = 17;
+    optional Reference ancestor = 18;
+  }
+}
+
+message CompiledCursor {
+  optional group Position = 2 {
+    optional string start_key = 27;
+
+    repeated group IndexValue = 29 {
+      optional string property = 30;
+      required PropertyValue value = 31;
+    }
+
+    optional Reference key = 32;
+
+    optional bool start_inclusive = 28 [default=true];
+  }
+}
+
+message Cursor {
+  required fixed64 cursor = 1;
+
+  optional string app = 2;
+}
+
+message Error {
+  enum ErrorCode {
+    BAD_REQUEST = 1;
+    CONCURRENT_TRANSACTION = 2;
+    INTERNAL_ERROR = 3;
+    NEED_INDEX = 4;
+    TIMEOUT = 5;
+    PERMISSION_DENIED = 6;
+    BIGTABLE_ERROR = 7;
+    COMMITTED_BUT_STILL_APPLYING = 8;
+    CAPABILITY_DISABLED = 9;
+    TRY_ALTERNATE_BACKEND = 10;
+    SAFE_TIME_TOO_OLD = 11;
+  }
+}
+
+message Cost {
+  optional int32 index_writes = 1;
+  optional int32 index_write_bytes = 2;
+  optional int32 entity_writes = 3;
+  optional int32 entity_write_bytes = 4;
+  optional group CommitCost = 5 {
+    optional int32 requested_entity_puts = 6;
+    optional int32 requested_entity_deletes = 7;
+  };
+  optional int32 approximate_storage_delta = 8;
+  optional int32 id_sequence_updates = 9;
+}
+
+message GetRequest {
+  optional InternalHeader header = 6;
+
+  repeated Reference key = 1;
+  optional Transaction transaction = 2;
+
+  optional int64 failover_ms = 3;
+
+  optional bool strong = 4;
+
+  optional bool allow_deferred = 5 [default=false];
+}
+
+message GetResponse {
+  repeated group Entity = 1 {
+    optional EntityProto entity = 2;
+    optional Reference key = 4;
+
+    optional int64 version = 3;
+  }
+
+  repeated Reference deferred = 5;
+
+  optional bool in_order = 6 [default=true];
+}
+
+message PutRequest {
+  optional InternalHeader header = 11;
+
+  repeated EntityProto entity = 1;
+  optional Transaction transaction = 2;
+  repeated CompositeIndex composite_index = 3;
+
+  optional bool trusted = 4 [default = false];
+
+  optional bool force = 7 [default = false];
+
+  optional bool mark_changes = 8 [default = false];
+  repeated Snapshot snapshot = 9;
+
+  enum AutoIdPolicy {
+    CURRENT = 0;
+    SEQUENTIAL = 1;
+  }
+  optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
+}
+
+message PutResponse {
+  repeated Reference key = 1;
+  optional Cost cost = 2;
+  repeated int64 version = 3;
+}
+
+message TouchRequest {
+  optional InternalHeader header = 10;
+
+  repeated Reference key = 1;
+  repeated CompositeIndex composite_index = 2;
+  optional bool force = 3 [default = false];
+  repeated Snapshot snapshot = 9;
+}
+
+message TouchResponse {
+  optional Cost cost = 1;
+}
+
+message DeleteRequest {
+  optional InternalHeader header = 10;
+
+  repeated Reference key = 6;
+  optional Transaction transaction = 5;
+
+  optional bool trusted = 4 [default = false];
+
+  optional bool force = 7 [default = false];
+
+  optional bool mark_changes = 8 [default = false];
+  repeated Snapshot snapshot = 9;
+}
+
+message DeleteResponse {
+  optional Cost cost = 1;
+  repeated int64 version = 3;
+}
+
+message NextRequest {
+  optional InternalHeader header = 5;
+
+  required Cursor cursor = 1;
+  optional int32 count = 2;
+
+  optional int32 offset = 4 [default = 0];
+
+  optional bool compile = 3 [default = false];
+}
+
+message QueryResult {
+  optional Cursor cursor = 1;
+
+  repeated EntityProto result = 2;
+
+  optional int32 skipped_results = 7;
+
+  required bool more_results = 3;
+
+  optional bool keys_only = 4;
+
+  optional bool index_only = 9;
+
+  optional bool small_ops = 10;
+
+  optional CompiledQuery compiled_query = 5;
+
+  optional CompiledCursor compiled_cursor = 6;
+
+  repeated CompositeIndex index = 8;
+
+  repeated int64 version = 11;
+}
+
+message AllocateIdsRequest {
+  optional InternalHeader header = 4;
+
+  optional Reference model_key = 1;
+
+  optional int64 size = 2;
+
+  optional int64 max = 3;
+
+  repeated Reference reserve = 5;
+}
+
+message AllocateIdsResponse {
+  required int64 start = 1;
+  required int64 end = 2;
+  optional Cost cost = 3;
+}
+
+message CompositeIndices {
+  repeated CompositeIndex index = 1;
+}
+
+message AddActionsRequest {
+  optional InternalHeader header = 3;
+
+  required Transaction transaction = 1;
+  repeated Action action = 2;
+}
+
+message AddActionsResponse {
+}
+
+message BeginTransactionRequest {
+  optional InternalHeader header = 3;
+
+  required string app = 1;
+  optional bool allow_multiple_eg = 2 [default = false];
+}
+
+message CommitResponse {
+  optional Cost cost = 1;
+
+  repeated group Version = 3 {
+    required Reference root_entity_key = 4;
+    required int64 version = 5;
+  }
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
new file mode 100644
index 00000000..d538701a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity.go
@@ -0,0 +1,14 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import netcontext "golang.org/x/net/context"
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+func AppID(c netcontext.Context) string {
+	return appID(FullyQualifiedAppID(c))
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
new file mode 100644
index 00000000..b59603f1
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_classic.go
@@ -0,0 +1,57 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+	"appengine"
+
+	netcontext "golang.org/x/net/context"
+)
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+	c := fromContext(ctx)
+	if c == nil {
+		panic(errNotAppEngineContext)
+	}
+	return appengine.DefaultVersionHostname(c)
+}
+
+func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
+func ServerSoftware() string                 { return appengine.ServerSoftware() }
+func InstanceID() string                     { return appengine.InstanceID() }
+func IsDevAppServer() bool                   { return appengine.IsDevAppServer() }
+
+func RequestID(ctx netcontext.Context) string {
+	c := fromContext(ctx)
+	if c == nil {
+		panic(errNotAppEngineContext)
+	}
+	return appengine.RequestID(c)
+}
+
+func ModuleName(ctx netcontext.Context) string {
+	c := fromContext(ctx)
+	if c == nil {
+		panic(errNotAppEngineContext)
+	}
+	return appengine.ModuleName(c)
+}
+func VersionID(ctx netcontext.Context) string {
+	c := fromContext(ctx)
+	if c == nil {
+		panic(errNotAppEngineContext)
+	}
+	return appengine.VersionID(c)
+}
+
+func fullyQualifiedAppID(ctx netcontext.Context) string {
+	c := fromContext(ctx)
+	if c == nil {
+		panic(errNotAppEngineContext)
+	}
+	return c.FullyQualifiedAppID()
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
new file mode 100644
index 00000000..d5fa75be
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_vm.go
@@ -0,0 +1,101 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+	"net/http"
+	"os"
+
+	netcontext "golang.org/x/net/context"
+)
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+const (
+	hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
+	hRequestLogId           = "X-AppEngine-Request-Log-Id"
+	hDatacenter             = "X-AppEngine-Datacenter"
+)
+
+func ctxHeaders(ctx netcontext.Context) http.Header {
+	c := fromContext(ctx)
+	if c == nil {
+		return nil
+	}
+	return c.Request().Header
+}
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+	return ctxHeaders(ctx).Get(hDefaultVersionHostname)
+}
+
+func RequestID(ctx netcontext.Context) string {
+	return ctxHeaders(ctx).Get(hRequestLogId)
+}
+
+func Datacenter(ctx netcontext.Context) string {
+	return ctxHeaders(ctx).Get(hDatacenter)
+}
+
+func ServerSoftware() string {
+	// TODO(dsymonds): Remove fallback when we've verified this.
+	if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
+		return s
+	}
+	return "Google App Engine/1.x.x"
+}
+
+// TODO(dsymonds): Remove the metadata fetches.
+
+func ModuleName(_ netcontext.Context) string {
+	if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
+		return s
+	}
+	return string(mustGetMetadata("instance/attributes/gae_backend_name"))
+}
+
+func VersionID(_ netcontext.Context) string {
+	if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
+		return s1 + "." + s2
+	}
+	return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
+}
+
+func InstanceID() string {
+	if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
+		return s
+	}
+	return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
+}
+
+func partitionlessAppID() string {
+	// gae_project has everything except the partition prefix.
+	appID := os.Getenv("GAE_LONG_APP_ID")
+	if appID == "" {
+		appID = string(mustGetMetadata("instance/attributes/gae_project"))
+	}
+	return appID
+}
+
+func fullyQualifiedAppID(_ netcontext.Context) string {
+	appID := partitionlessAppID()
+
+	part := os.Getenv("GAE_PARTITION")
+	if part == "" {
+		part = string(mustGetMetadata("instance/attributes/gae_partition"))
+	}
+
+	if part != "" {
+		appID = part + "~" + appID
+	}
+	return appID
+}
+
+func IsDevAppServer() bool {
+	return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
+}
diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
new file mode 100644
index 00000000..051ea398
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/internal.go
@@ -0,0 +1,110 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package internal provides support for package appengine.
+//
+// Programs should not use this package directly. Its API is not stable.
+// Use packages appengine and appengine/* instead.
+package internal
+
+import (
+	"fmt"
+
+	"github.com/golang/protobuf/proto"
+
+	remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+// errorCodeMaps is a map of service name to the error code map for the service.
+var errorCodeMaps = make(map[string]map[int32]string)
+
+// RegisterErrorCodeMap is called from API implementations to register their
+// error code map. This should only be called from init functions.
+func RegisterErrorCodeMap(service string, m map[int32]string) {
+	errorCodeMaps[service] = m
+}
+
+type timeoutCodeKey struct {
+	service string
+	code    int32
+}
+
+// timeoutCodes is the set of service+code pairs that represent timeouts.
+var timeoutCodes = make(map[timeoutCodeKey]bool)
+
+func RegisterTimeoutErrorCode(service string, code int32) {
+	timeoutCodes[timeoutCodeKey{service, code}] = true
+}
+
+// APIError is the type returned by appengine.Context's Call method
+// when an API call fails in an API-specific way. This may be, for instance,
+// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
+type APIError struct {
+	Service string
+	Detail  string
+	Code    int32 // API-specific error code
+}
+
+func (e *APIError) Error() string {
+	if e.Code == 0 {
+		if e.Detail == "" {
+			return "APIError <empty>"
+		}
+		return e.Detail
+	}
+	s := fmt.Sprintf("API error %d", e.Code)
+	if m, ok := errorCodeMaps[e.Service]; ok {
+		s += " (" + e.Service + ": " + m[e.Code] + ")"
+	} else {
+		// Shouldn't happen, but provide a bit more detail if it does.
+		s = e.Service + " " + s
+	}
+	if e.Detail != "" {
+		s += ": " + e.Detail
+	}
+	return s
+}
+
+func (e *APIError) IsTimeout() bool {
+	return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
+}
+
+// CallError is the type returned by appengine.Context's Call method when an
+// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
+type CallError struct {
+	Detail string
+	Code   int32
+	// TODO: Remove this if we get a distinguishable error code.
+	Timeout bool
+}
+
+func (e *CallError) Error() string {
+	var msg string
+	switch remotepb.RpcError_ErrorCode(e.Code) {
+	case remotepb.RpcError_UNKNOWN:
+		return e.Detail
+	case remotepb.RpcError_OVER_QUOTA:
+		msg = "Over quota"
+	case remotepb.RpcError_CAPABILITY_DISABLED:
+		msg = "Capability disabled"
+	case remotepb.RpcError_CANCELLED:
+		msg = "Canceled"
+	default:
+		msg = fmt.Sprintf("Call error %d", e.Code)
+	}
+	s := msg + ": " + e.Detail
+	if e.Timeout {
+		s += " (timeout)"
+	}
+	return s
+}
+
+func (e *CallError) IsTimeout() bool {
+	return e.Timeout
+}
+
+// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
+// The function should be prepared to be called on the same message more than once; it should only modify the
+// RPC request the first time.
+var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
new file mode 100644
index 00000000..20c595be
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go
@@ -0,0 +1,899 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/log/log_service.proto
+// DO NOT EDIT!
+
+/*
+Package log is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/log/log_service.proto
+
+It has these top-level messages:
+	LogServiceError
+	UserAppLogLine
+	UserAppLogGroup
+	FlushRequest
+	SetStatusRequest
+	LogOffset
+	LogLine
+	RequestLog
+	LogModuleVersion
+	LogReadRequest
+	LogReadResponse
+	LogUsageRecord
+	LogUsageRequest
+	LogUsageResponse
+*/
+package log
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type LogServiceError_ErrorCode int32
+
+const (
+	LogServiceError_OK              LogServiceError_ErrorCode = 0
+	LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
+	LogServiceError_STORAGE_ERROR   LogServiceError_ErrorCode = 2
+)
+
+var LogServiceError_ErrorCode_name = map[int32]string{
+	0: "OK",
+	1: "INVALID_REQUEST",
+	2: "STORAGE_ERROR",
+}
+var LogServiceError_ErrorCode_value = map[string]int32{
+	"OK":              0,
+	"INVALID_REQUEST": 1,
+	"STORAGE_ERROR":   2,
+}
+
+func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
+	p := new(LogServiceError_ErrorCode)
+	*p = x
+	return p
+}
+func (x LogServiceError_ErrorCode) String() string {
+	return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
+}
+func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
+	if err != nil {
+		return err
+	}
+	*x = LogServiceError_ErrorCode(value)
+	return nil
+}
+
+type LogServiceError struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogServiceError) Reset()         { *m = LogServiceError{} }
+func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
+func (*LogServiceError) ProtoMessage()    {}
+
+type UserAppLogLine struct {
+	TimestampUsec    *int64  `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"`
+	Level            *int64  `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+	Message          *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *UserAppLogLine) Reset()         { *m = UserAppLogLine{} }
+func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogLine) ProtoMessage()    {}
+
+func (m *UserAppLogLine) GetTimestampUsec() int64 {
+	if m != nil && m.TimestampUsec != nil {
+		return *m.TimestampUsec
+	}
+	return 0
+}
+
+func (m *UserAppLogLine) GetLevel() int64 {
+	if m != nil && m.Level != nil {
+		return *m.Level
+	}
+	return 0
+}
+
+func (m *UserAppLogLine) GetMessage() string {
+	if m != nil && m.Message != nil {
+		return *m.Message
+	}
+	return ""
+}
+
+type UserAppLogGroup struct {
+	LogLine          []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *UserAppLogGroup) Reset()         { *m = UserAppLogGroup{} }
+func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogGroup) ProtoMessage()    {}
+
+func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
+	if m != nil {
+		return m.LogLine
+	}
+	return nil
+}
+
+type FlushRequest struct {
+	Logs             []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FlushRequest) Reset()         { *m = FlushRequest{} }
+func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
+func (*FlushRequest) ProtoMessage()    {}
+
+func (m *FlushRequest) GetLogs() []byte {
+	if m != nil {
+		return m.Logs
+	}
+	return nil
+}
+
+type SetStatusRequest struct {
+	Status           *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *SetStatusRequest) Reset()         { *m = SetStatusRequest{} }
+func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
+func (*SetStatusRequest) ProtoMessage()    {}
+
+func (m *SetStatusRequest) GetStatus() string {
+	if m != nil && m.Status != nil {
+		return *m.Status
+	}
+	return ""
+}
+
+type LogOffset struct {
+	RequestId        []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogOffset) Reset()         { *m = LogOffset{} }
+func (m *LogOffset) String() string { return proto.CompactTextString(m) }
+func (*LogOffset) ProtoMessage()    {}
+
+func (m *LogOffset) GetRequestId() []byte {
+	if m != nil {
+		return m.RequestId
+	}
+	return nil
+}
+
+type LogLine struct {
+	Time             *int64  `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
+	Level            *int32  `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+	LogMessage       *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *LogLine) Reset()         { *m = LogLine{} }
+func (m *LogLine) String() string { return proto.CompactTextString(m) }
+func (*LogLine) ProtoMessage()    {}
+
+func (m *LogLine) GetTime() int64 {
+	if m != nil && m.Time != nil {
+		return *m.Time
+	}
+	return 0
+}
+
+func (m *LogLine) GetLevel() int32 {
+	if m != nil && m.Level != nil {
+		return *m.Level
+	}
+	return 0
+}
+
+func (m *LogLine) GetLogMessage() string {
+	if m != nil && m.LogMessage != nil {
+		return *m.LogMessage
+	}
+	return ""
+}
+
+type RequestLog struct {
+	AppId                   *string    `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+	ModuleId                *string    `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"`
+	VersionId               *string    `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"`
+	RequestId               []byte     `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"`
+	Offset                  *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
+	Ip                      *string    `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
+	Nickname                *string    `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
+	StartTime               *int64     `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"`
+	EndTime                 *int64     `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"`
+	Latency                 *int64     `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
+	Mcycles                 *int64     `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
+	Method                  *string    `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
+	Resource                *string    `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
+	HttpVersion             *string    `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"`
+	Status                  *int32     `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
+	ResponseSize            *int64     `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"`
+	Referrer                *string    `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
+	UserAgent               *string    `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"`
+	UrlMapEntry             *string    `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"`
+	Combined                *string    `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
+	ApiMcycles              *int64     `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"`
+	Host                    *string    `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
+	Cost                    *float64   `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
+	TaskQueueName           *string    `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"`
+	TaskName                *string    `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"`
+	WasLoadingRequest       *bool      `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"`
+	PendingTime             *int64     `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"`
+	ReplicaIndex            *int32     `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"`
+	Finished                *bool      `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
+	CloneKey                []byte     `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"`
+	Line                    []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
+	LinesIncomplete         *bool      `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"`
+	AppEngineRelease        []byte     `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"`
+	ExitReason              *int32     `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"`
+	WasThrottledForTime     *bool      `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"`
+	WasThrottledForRequests *bool      `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"`
+	ThrottledTime           *int64     `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"`
+	ServerName              []byte     `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"`
+	XXX_unrecognized        []byte     `json:"-"`
+}
+
+func (m *RequestLog) Reset()         { *m = RequestLog{} }
+func (m *RequestLog) String() string { return proto.CompactTextString(m) }
+func (*RequestLog) ProtoMessage()    {}
+
+const Default_RequestLog_ModuleId string = "default"
+const Default_RequestLog_ReplicaIndex int32 = -1
+const Default_RequestLog_Finished bool = true
+
+func (m *RequestLog) GetAppId() string {
+	if m != nil && m.AppId != nil {
+		return *m.AppId
+	}
+	return ""
+}
+
+func (m *RequestLog) GetModuleId() string {
+	if m != nil && m.ModuleId != nil {
+		return *m.ModuleId
+	}
+	return Default_RequestLog_ModuleId
+}
+
+func (m *RequestLog) GetVersionId() string {
+	if m != nil && m.VersionId != nil {
+		return *m.VersionId
+	}
+	return ""
+}
+
+func (m *RequestLog) GetRequestId() []byte {
+	if m != nil {
+		return m.RequestId
+	}
+	return nil
+}
+
+func (m *RequestLog) GetOffset() *LogOffset {
+	if m != nil {
+		return m.Offset
+	}
+	return nil
+}
+
+func (m *RequestLog) GetIp() string {
+	if m != nil && m.Ip != nil {
+		return *m.Ip
+	}
+	return ""
+}
+
+func (m *RequestLog) GetNickname() string {
+	if m != nil && m.Nickname != nil {
+		return *m.Nickname
+	}
+	return ""
+}
+
+func (m *RequestLog) GetStartTime() int64 {
+	if m != nil && m.StartTime != nil {
+		return *m.StartTime
+	}
+	return 0
+}
+
+func (m *RequestLog) GetEndTime() int64 {
+	if m != nil && m.EndTime != nil {
+		return *m.EndTime
+	}
+	return 0
+}
+
+func (m *RequestLog) GetLatency() int64 {
+	if m != nil && m.Latency != nil {
+		return *m.Latency
+	}
+	return 0
+}
+
+func (m *RequestLog) GetMcycles() int64 {
+	if m != nil && m.Mcycles != nil {
+		return *m.Mcycles
+	}
+	return 0
+}
+
+func (m *RequestLog) GetMethod() string {
+	if m != nil && m.Method != nil {
+		return *m.Method
+	}
+	return ""
+}
+
+func (m *RequestLog) GetResource() string {
+	if m != nil && m.Resource != nil {
+		return *m.Resource
+	}
+	return ""
+}
+
+func (m *RequestLog) GetHttpVersion() string {
+	if m != nil && m.HttpVersion != nil {
+		return *m.HttpVersion
+	}
+	return ""
+}
+
+func (m *RequestLog) GetStatus() int32 {
+	if m != nil && m.Status != nil {
+		return *m.Status
+	}
+	return 0
+}
+
+func (m *RequestLog) GetResponseSize() int64 {
+	if m != nil && m.ResponseSize != nil {
+		return *m.ResponseSize
+	}
+	return 0
+}
+
+func (m *RequestLog) GetReferrer() string {
+	if m != nil && m.Referrer != nil {
+		return *m.Referrer
+	}
+	return ""
+}
+
+func (m *RequestLog) GetUserAgent() string {
+	if m != nil && m.UserAgent != nil {
+		return *m.UserAgent
+	}
+	return ""
+}
+
+func (m *RequestLog) GetUrlMapEntry() string {
+	if m != nil && m.UrlMapEntry != nil {
+		return *m.UrlMapEntry
+	}
+	return ""
+}
+
+func (m *RequestLog) GetCombined() string {
+	if m != nil && m.Combined != nil {
+		return *m.Combined
+	}
+	return ""
+}
+
+func (m *RequestLog) GetApiMcycles() int64 {
+	if m != nil && m.ApiMcycles != nil {
+		return *m.ApiMcycles
+	}
+	return 0
+}
+
+func (m *RequestLog) GetHost() string {
+	if m != nil && m.Host != nil {
+		return *m.Host
+	}
+	return ""
+}
+
+func (m *RequestLog) GetCost() float64 {
+	if m != nil && m.Cost != nil {
+		return *m.Cost
+	}
+	return 0
+}
+
+func (m *RequestLog) GetTaskQueueName() string {
+	if m != nil && m.TaskQueueName != nil {
+		return *m.TaskQueueName
+	}
+	return ""
+}
+
+func (m *RequestLog) GetTaskName() string {
+	if m != nil && m.TaskName != nil {
+		return *m.TaskName
+	}
+	return ""
+}
+
+func (m *RequestLog) GetWasLoadingRequest() bool {
+	if m != nil && m.WasLoadingRequest != nil {
+		return *m.WasLoadingRequest
+	}
+	return false
+}
+
+func (m *RequestLog) GetPendingTime() int64 {
+	if m != nil && m.PendingTime != nil {
+		return *m.PendingTime
+	}
+	return 0
+}
+
+func (m *RequestLog) GetReplicaIndex() int32 {
+	if m != nil && m.ReplicaIndex != nil {
+		return *m.ReplicaIndex
+	}
+	return Default_RequestLog_ReplicaIndex
+}
+
+func (m *RequestLog) GetFinished() bool {
+	if m != nil && m.Finished != nil {
+		return *m.Finished
+	}
+	return Default_RequestLog_Finished
+}
+
+func (m *RequestLog) GetCloneKey() []byte {
+	if m != nil {
+		return m.CloneKey
+	}
+	return nil
+}
+
+func (m *RequestLog) GetLine() []*LogLine {
+	if m != nil {
+		return m.Line
+	}
+	return nil
+}
+
+func (m *RequestLog) GetLinesIncomplete() bool {
+	if m != nil && m.LinesIncomplete != nil {
+		return *m.LinesIncomplete
+	}
+	return false
+}
+
+func (m *RequestLog) GetAppEngineRelease() []byte {
+	if m != nil {
+		return m.AppEngineRelease
+	}
+	return nil
+}
+
+func (m *RequestLog) GetExitReason() int32 {
+	if m != nil && m.ExitReason != nil {
+		return *m.ExitReason
+	}
+	return 0
+}
+
+func (m *RequestLog) GetWasThrottledForTime() bool {
+	if m != nil && m.WasThrottledForTime != nil {
+		return *m.WasThrottledForTime
+	}
+	return false
+}
+
+func (m *RequestLog) GetWasThrottledForRequests() bool {
+	if m != nil && m.WasThrottledForRequests != nil {
+		return *m.WasThrottledForRequests
+	}
+	return false
+}
+
+func (m *RequestLog) GetThrottledTime() int64 {
+	if m != nil && m.ThrottledTime != nil {
+		return *m.ThrottledTime
+	}
+	return 0
+}
+
+func (m *RequestLog) GetServerName() []byte {
+	if m != nil {
+		return m.ServerName
+	}
+	return nil
+}
+
+type LogModuleVersion struct {
+	ModuleId         *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"`
+	VersionId        *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *LogModuleVersion) Reset()         { *m = LogModuleVersion{} }
+func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
+func (*LogModuleVersion) ProtoMessage()    {}
+
+const Default_LogModuleVersion_ModuleId string = "default"
+
+func (m *LogModuleVersion) GetModuleId() string {
+	if m != nil && m.ModuleId != nil {
+		return *m.ModuleId
+	}
+	return Default_LogModuleVersion_ModuleId
+}
+
+func (m *LogModuleVersion) GetVersionId() string {
+	if m != nil && m.VersionId != nil {
+		return *m.VersionId
+	}
+	return ""
+}
+
+type LogReadRequest struct {
+	AppId             *string             `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+	VersionId         []string            `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
+	ModuleVersion     []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"`
+	StartTime         *int64              `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
+	EndTime           *int64              `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
+	Offset            *LogOffset          `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
+	RequestId         [][]byte            `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"`
+	MinimumLogLevel   *int32              `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"`
+	IncludeIncomplete *bool               `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"`
+	Count             *int64              `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
+	CombinedLogRegex  *string             `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"`
+	HostRegex         *string             `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"`
+	ReplicaIndex      *int32              `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"`
+	IncludeAppLogs    *bool               `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"`
+	AppLogsPerRequest *int32              `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"`
+	IncludeHost       *bool               `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"`
+	IncludeAll        *bool               `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"`
+	CacheIterator     *bool               `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"`
+	NumShards         *int32              `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"`
+	XXX_unrecognized  []byte              `json:"-"`
+}
+
+func (m *LogReadRequest) Reset()         { *m = LogReadRequest{} }
+func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
+func (*LogReadRequest) ProtoMessage()    {}
+
+func (m *LogReadRequest) GetAppId() string {
+	if m != nil && m.AppId != nil {
+		return *m.AppId
+	}
+	return ""
+}
+
+func (m *LogReadRequest) GetVersionId() []string {
+	if m != nil {
+		return m.VersionId
+	}
+	return nil
+}
+
+func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
+	if m != nil {
+		return m.ModuleVersion
+	}
+	return nil
+}
+
+func (m *LogReadRequest) GetStartTime() int64 {
+	if m != nil && m.StartTime != nil {
+		return *m.StartTime
+	}
+	return 0
+}
+
+func (m *LogReadRequest) GetEndTime() int64 {
+	if m != nil && m.EndTime != nil {
+		return *m.EndTime
+	}
+	return 0
+}
+
+func (m *LogReadRequest) GetOffset() *LogOffset {
+	if m != nil {
+		return m.Offset
+	}
+	return nil
+}
+
+func (m *LogReadRequest) GetRequestId() [][]byte {
+	if m != nil {
+		return m.RequestId
+	}
+	return nil
+}
+
+func (m *LogReadRequest) GetMinimumLogLevel() int32 {
+	if m != nil && m.MinimumLogLevel != nil {
+		return *m.MinimumLogLevel
+	}
+	return 0
+}
+
+func (m *LogReadRequest) GetIncludeIncomplete() bool {
+	if m != nil && m.IncludeIncomplete != nil {
+		return *m.IncludeIncomplete
+	}
+	return false
+}
+
+func (m *LogReadRequest) GetCount() int64 {
+	if m != nil && m.Count != nil {
+		return *m.Count
+	}
+	return 0
+}
+
+func (m *LogReadRequest) GetCombinedLogRegex() string {
+	if m != nil && m.CombinedLogRegex != nil {
+		return *m.CombinedLogRegex
+	}
+	return ""
+}
+
+func (m *LogReadRequest) GetHostRegex() string {
+	if m != nil && m.HostRegex != nil {
+		return *m.HostRegex
+	}
+	return ""
+}
+
+func (m *LogReadRequest) GetReplicaIndex() int32 {
+	if m != nil && m.ReplicaIndex != nil {
+		return *m.ReplicaIndex
+	}
+	return 0
+}
+
+func (m *LogReadRequest) GetIncludeAppLogs() bool {
+	if m != nil && m.IncludeAppLogs != nil {
+		return *m.IncludeAppLogs
+	}
+	return false
+}
+
+func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
+	if m != nil && m.AppLogsPerRequest != nil {
+		return *m.AppLogsPerRequest
+	}
+	return 0
+}
+
+func (m *LogReadRequest) GetIncludeHost() bool {
+	if m != nil && m.IncludeHost != nil {
+		return *m.IncludeHost
+	}
+	return false
+}
+
+func (m *LogReadRequest) GetIncludeAll() bool {
+	if m != nil && m.IncludeAll != nil {
+		return *m.IncludeAll
+	}
+	return false
+}
+
+func (m *LogReadRequest) GetCacheIterator() bool {
+	if m != nil && m.CacheIterator != nil {
+		return *m.CacheIterator
+	}
+	return false
+}
+
+func (m *LogReadRequest) GetNumShards() int32 {
+	if m != nil && m.NumShards != nil {
+		return *m.NumShards
+	}
+	return 0
+}
+
+type LogReadResponse struct {
+	Log              []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
+	Offset           *LogOffset    `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
+	LastEndTime      *int64        `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"`
+	XXX_unrecognized []byte        `json:"-"`
+}
+
+func (m *LogReadResponse) Reset()         { *m = LogReadResponse{} }
+func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
+func (*LogReadResponse) ProtoMessage()    {}
+
+func (m *LogReadResponse) GetLog() []*RequestLog {
+	if m != nil {
+		return m.Log
+	}
+	return nil
+}
+
+func (m *LogReadResponse) GetOffset() *LogOffset {
+	if m != nil {
+		return m.Offset
+	}
+	return nil
+}
+
+func (m *LogReadResponse) GetLastEndTime() int64 {
+	if m != nil && m.LastEndTime != nil {
+		return *m.LastEndTime
+	}
+	return 0
+}
+
+type LogUsageRecord struct {
+	VersionId        *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"`
+	StartTime        *int32  `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"`
+	EndTime          *int32  `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"`
+	Count            *int64  `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
+	TotalSize        *int64  `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"`
+	Records          *int32  `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *LogUsageRecord) Reset()         { *m = LogUsageRecord{} }
+func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRecord) ProtoMessage()    {}
+
+func (m *LogUsageRecord) GetVersionId() string {
+	if m != nil && m.VersionId != nil {
+		return *m.VersionId
+	}
+	return ""
+}
+
+func (m *LogUsageRecord) GetStartTime() int32 {
+	if m != nil && m.StartTime != nil {
+		return *m.StartTime
+	}
+	return 0
+}
+
+func (m *LogUsageRecord) GetEndTime() int32 {
+	if m != nil && m.EndTime != nil {
+		return *m.EndTime
+	}
+	return 0
+}
+
+func (m *LogUsageRecord) GetCount() int64 {
+	if m != nil && m.Count != nil {
+		return *m.Count
+	}
+	return 0
+}
+
+func (m *LogUsageRecord) GetTotalSize() int64 {
+	if m != nil && m.TotalSize != nil {
+		return *m.TotalSize
+	}
+	return 0
+}
+
+func (m *LogUsageRecord) GetRecords() int32 {
+	if m != nil && m.Records != nil {
+		return *m.Records
+	}
+	return 0
+}
+
+type LogUsageRequest struct {
+	AppId            *string  `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+	VersionId        []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
+	StartTime        *int32   `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
+	EndTime          *int32   `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
+	ResolutionHours  *uint32  `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"`
+	CombineVersions  *bool    `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"`
+	UsageVersion     *int32   `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"`
+	VersionsOnly     *bool    `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *LogUsageRequest) Reset()         { *m = LogUsageRequest{} }
+func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRequest) ProtoMessage()    {}
+
+const Default_LogUsageRequest_ResolutionHours uint32 = 1
+
+func (m *LogUsageRequest) GetAppId() string {
+	if m != nil && m.AppId != nil {
+		return *m.AppId
+	}
+	return ""
+}
+
+func (m *LogUsageRequest) GetVersionId() []string {
+	if m != nil {
+		return m.VersionId
+	}
+	return nil
+}
+
+func (m *LogUsageRequest) GetStartTime() int32 {
+	if m != nil && m.StartTime != nil {
+		return *m.StartTime
+	}
+	return 0
+}
+
+func (m *LogUsageRequest) GetEndTime() int32 {
+	if m != nil && m.EndTime != nil {
+		return *m.EndTime
+	}
+	return 0
+}
+
+func (m *LogUsageRequest) GetResolutionHours() uint32 {
+	if m != nil && m.ResolutionHours != nil {
+		return *m.ResolutionHours
+	}
+	return Default_LogUsageRequest_ResolutionHours
+}
+
+func (m *LogUsageRequest) GetCombineVersions() bool {
+	if m != nil && m.CombineVersions != nil {
+		return *m.CombineVersions
+	}
+	return false
+}
+
+func (m *LogUsageRequest) GetUsageVersion() int32 {
+	if m != nil && m.UsageVersion != nil {
+		return *m.UsageVersion
+	}
+	return 0
+}
+
+func (m *LogUsageRequest) GetVersionsOnly() bool {
+	if m != nil && m.VersionsOnly != nil {
+		return *m.VersionsOnly
+	}
+	return false
+}
+
+type LogUsageResponse struct {
+	Usage            []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
+	Summary          *LogUsageRecord   `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *LogUsageResponse) Reset()         { *m = LogUsageResponse{} }
+func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
+func (*LogUsageResponse) ProtoMessage()    {}
+
+func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
+	if m != nil {
+		return m.Usage
+	}
+	return nil
+}
+
+func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
+	if m != nil {
+		return m.Summary
+	}
+	return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto
new file mode 100644
index 00000000..8981dc47
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log/log_service.proto
@@ -0,0 +1,150 @@
+syntax = "proto2";
+option go_package = "log";
+
+package appengine;
+
+message LogServiceError {
+  enum ErrorCode {
+    OK  = 0;
+    INVALID_REQUEST = 1;
+    STORAGE_ERROR = 2;
+  }
+}
+
+message UserAppLogLine {
+  required int64 timestamp_usec = 1;
+  required int64 level = 2;
+  required string message = 3;
+}
+
+message UserAppLogGroup {
+  repeated UserAppLogLine log_line = 2;
+}
+
+message FlushRequest {
+  optional bytes logs = 1;
+}
+
+message SetStatusRequest {
+  required string status = 1;
+}
+
+
+message LogOffset {
+  optional bytes request_id = 1;
+}
+
+message LogLine {
+  required int64 time = 1;
+  required int32 level = 2;
+  required string log_message = 3;
+}
+
+message RequestLog {
+  required string app_id = 1;
+  optional string module_id = 37 [default="default"];
+  required string version_id = 2;
+  required bytes request_id = 3;
+  optional LogOffset offset = 35;
+  required string ip = 4;
+  optional string nickname = 5;
+  required int64 start_time = 6;
+  required int64 end_time = 7;
+  required int64 latency = 8;
+  required int64 mcycles = 9;
+  required string method = 10;
+  required string resource = 11;
+  required string http_version = 12;
+  required int32 status = 13;
+  required int64 response_size = 14;
+  optional string referrer = 15;
+  optional string user_agent = 16;
+  required string url_map_entry = 17;
+  required string combined = 18;
+  optional int64 api_mcycles = 19;
+  optional string host = 20;
+  optional double cost = 21;
+
+  optional string task_queue_name = 22;
+  optional string task_name = 23;
+
+  optional bool was_loading_request = 24;
+  optional int64 pending_time = 25;
+  optional int32 replica_index = 26 [default = -1];
+  optional bool finished = 27 [default = true];
+  optional bytes clone_key = 28;
+
+  repeated LogLine line = 29;
+
+  optional bool lines_incomplete = 36;
+  optional bytes app_engine_release = 38;
+
+  optional int32 exit_reason = 30;
+  optional bool was_throttled_for_time = 31;
+  optional bool was_throttled_for_requests = 32;
+  optional int64 throttled_time = 33;
+
+  optional bytes server_name = 34;
+}
+
+message LogModuleVersion {
+  optional string module_id = 1 [default="default"];
+  optional string version_id = 2;
+}
+
+message LogReadRequest {
+  required string app_id = 1;
+  repeated string version_id = 2;
+  repeated LogModuleVersion module_version = 19;
+
+  optional int64 start_time = 3;
+  optional int64 end_time = 4;
+  optional LogOffset offset = 5;
+  repeated bytes request_id = 6;
+
+  optional int32 minimum_log_level = 7;
+  optional bool include_incomplete = 8;
+  optional int64 count = 9;
+
+  optional string combined_log_regex = 14;
+  optional string host_regex = 15;
+  optional int32 replica_index = 16;
+
+  optional bool include_app_logs = 10;
+  optional int32 app_logs_per_request = 17;
+  optional bool include_host = 11;
+  optional bool include_all = 12;
+  optional bool cache_iterator = 13;
+  optional int32 num_shards = 18;
+}
+
+message LogReadResponse {
+  repeated RequestLog log = 1;
+  optional LogOffset offset = 2;
+  optional int64 last_end_time = 3;
+}
+
+message LogUsageRecord {
+  optional string version_id = 1;
+  optional int32 start_time = 2;
+  optional int32 end_time = 3;
+  optional int64 count = 4;
+  optional int64 total_size = 5;
+  optional int32 records = 6;
+}
+
+message LogUsageRequest {
+  required string app_id = 1;
+  repeated string version_id = 2;
+  optional int32 start_time = 3;
+  optional int32 end_time = 4;
+  optional uint32 resolution_hours = 5 [default = 1];
+  optional bool combine_versions = 6;
+  optional int32 usage_version = 7;
+  optional bool versions_only = 8;
+}
+
+message LogUsageResponse {
+  repeated LogUsageRecord usage = 1;
+  optional LogUsageRecord summary = 2;
+}
diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go
new file mode 100644
index 00000000..49036163
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main.go
@@ -0,0 +1,15 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+	"appengine_internal"
+)
+
+func Main() {
+	appengine_internal.Main()
+}
diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go
new file mode 100644
index 00000000..822e784a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/main_vm.go
@@ -0,0 +1,48 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+	"io"
+	"log"
+	"net/http"
+	"net/url"
+	"os"
+)
+
+func Main() {
+	installHealthChecker(http.DefaultServeMux)
+
+	port := "8080"
+	if s := os.Getenv("PORT"); s != "" {
+		port = s
+	}
+
+	host := ""
+	if IsDevAppServer() {
+		host = "127.0.0.1"
+	}
+	if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil {
+		log.Fatalf("http.ListenAndServe: %v", err)
+	}
+}
+
+func installHealthChecker(mux *http.ServeMux) {
+	// If no health check handler has been installed by this point, add a trivial one.
+	const healthPath = "/_ah/health"
+	hreq := &http.Request{
+		Method: "GET",
+		URL: &url.URL{
+			Path: healthPath,
+		},
+	}
+	if _, pat := mux.Handler(hreq); pat != healthPath {
+		mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
+			io.WriteString(w, "ok")
+		})
+	}
+}
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
new file mode 100644
index 00000000..9cc1f71d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/metadata.go
@@ -0,0 +1,61 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file has code for accessing metadata.
+//
+// References:
+//	https://cloud.google.com/compute/docs/metadata
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"net/http"
+	"net/url"
+)
+
+const (
+	metadataHost = "metadata"
+	metadataPath = "/computeMetadata/v1/"
+)
+
+var (
+	metadataRequestHeaders = http.Header{
+		"Metadata-Flavor": []string{"Google"},
+	}
+)
+
+// TODO(dsymonds): Do we need to support default values, like Python?
+func mustGetMetadata(key string) []byte {
+	b, err := getMetadata(key)
+	if err != nil {
+		log.Fatalf("Metadata fetch failed: %v", err)
+	}
+	return b
+}
+
+func getMetadata(key string) ([]byte, error) {
+	// TODO(dsymonds): May need to use url.Parse to support keys with query args.
+	req := &http.Request{
+		Method: "GET",
+		URL: &url.URL{
+			Scheme: "http",
+			Host:   metadataHost,
+			Path:   metadataPath + key,
+		},
+		Header: metadataRequestHeaders,
+		Host:   metadataHost,
+	}
+	resp, err := http.DefaultClient.Do(req)
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+	if resp.StatusCode != 200 {
+		return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
+	}
+	return ioutil.ReadAll(resp.Body)
+}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
new file mode 100644
index 00000000..a0145ed3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
@@ -0,0 +1,375 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/modules/modules_service.proto
+// DO NOT EDIT!
+
+/*
+Package modules is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/modules/modules_service.proto
+
+It has these top-level messages:
+	ModulesServiceError
+	GetModulesRequest
+	GetModulesResponse
+	GetVersionsRequest
+	GetVersionsResponse
+	GetDefaultVersionRequest
+	GetDefaultVersionResponse
+	GetNumInstancesRequest
+	GetNumInstancesResponse
+	SetNumInstancesRequest
+	SetNumInstancesResponse
+	StartModuleRequest
+	StartModuleResponse
+	StopModuleRequest
+	StopModuleResponse
+	GetHostnameRequest
+	GetHostnameResponse
+*/
+package modules
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type ModulesServiceError_ErrorCode int32
+
+const (
+	ModulesServiceError_OK                ModulesServiceError_ErrorCode = 0
+	ModulesServiceError_INVALID_MODULE    ModulesServiceError_ErrorCode = 1
+	ModulesServiceError_INVALID_VERSION   ModulesServiceError_ErrorCode = 2
+	ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3
+	ModulesServiceError_TRANSIENT_ERROR   ModulesServiceError_ErrorCode = 4
+	ModulesServiceError_UNEXPECTED_STATE  ModulesServiceError_ErrorCode = 5
+)
+
+var ModulesServiceError_ErrorCode_name = map[int32]string{
+	0: "OK",
+	1: "INVALID_MODULE",
+	2: "INVALID_VERSION",
+	3: "INVALID_INSTANCES",
+	4: "TRANSIENT_ERROR",
+	5: "UNEXPECTED_STATE",
+}
+var ModulesServiceError_ErrorCode_value = map[string]int32{
+	"OK":                0,
+	"INVALID_MODULE":    1,
+	"INVALID_VERSION":   2,
+	"INVALID_INSTANCES": 3,
+	"TRANSIENT_ERROR":   4,
+	"UNEXPECTED_STATE":  5,
+}
+
+func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode {
+	p := new(ModulesServiceError_ErrorCode)
+	*p = x
+	return p
+}
+func (x ModulesServiceError_ErrorCode) String() string {
+	return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x))
+}
+func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode")
+	if err != nil {
+		return err
+	}
+	*x = ModulesServiceError_ErrorCode(value)
+	return nil
+}
+
+type ModulesServiceError struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ModulesServiceError) Reset()         { *m = ModulesServiceError{} }
+func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }
+func (*ModulesServiceError) ProtoMessage()    {}
+
+type GetModulesRequest struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetModulesRequest) Reset()         { *m = GetModulesRequest{} }
+func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetModulesRequest) ProtoMessage()    {}
+
+type GetModulesResponse struct {
+	Module           []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *GetModulesResponse) Reset()         { *m = GetModulesResponse{} }
+func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetModulesResponse) ProtoMessage()    {}
+
+func (m *GetModulesResponse) GetModule() []string {
+	if m != nil {
+		return m.Module
+	}
+	return nil
+}
+
+type GetVersionsRequest struct {
+	Module           *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *GetVersionsRequest) Reset()         { *m = GetVersionsRequest{} }
+func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsRequest) ProtoMessage()    {}
+
+func (m *GetVersionsRequest) GetModule() string {
+	if m != nil && m.Module != nil {
+		return *m.Module
+	}
+	return ""
+}
+
+type GetVersionsResponse struct {
+	Version          []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte   `json:"-"`
+}
+
+func (m *GetVersionsResponse) Reset()         { *m = GetVersionsResponse{} }
+func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsResponse) ProtoMessage()    {}
+
+func (m *GetVersionsResponse) GetVersion() []string {
+	if m != nil {
+		return m.Version
+	}
+	return nil
+}
+
+type GetDefaultVersionRequest struct {
+	Module           *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *GetDefaultVersionRequest) Reset()         { *m = GetDefaultVersionRequest{} }
+func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionRequest) ProtoMessage()    {}
+
+func (m *GetDefaultVersionRequest) GetModule() string {
+	if m != nil && m.Module != nil {
+		return *m.Module
+	}
+	return ""
+}
+
+type GetDefaultVersionResponse struct {
+	Version          *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *GetDefaultVersionResponse) Reset()         { *m = GetDefaultVersionResponse{} }
+func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionResponse) ProtoMessage()    {}
+
+func (m *GetDefaultVersionResponse) GetVersion() string {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return ""
+}
+
+type GetNumInstancesRequest struct {
+	Module           *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+	Version          *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *GetNumInstancesRequest) Reset()         { *m = GetNumInstancesRequest{} }
+func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesRequest) ProtoMessage()    {}
+
+func (m *GetNumInstancesRequest) GetModule() string {
+	if m != nil && m.Module != nil {
+		return *m.Module
+	}
+	return ""
+}
+
+func (m *GetNumInstancesRequest) GetVersion() string {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return ""
+}
+
+type GetNumInstancesResponse struct {
+	Instances        *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"`
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetNumInstancesResponse) Reset()         { *m = GetNumInstancesResponse{} }
+func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesResponse) ProtoMessage()    {}
+
+func (m *GetNumInstancesResponse) GetInstances() int64 {
+	if m != nil && m.Instances != nil {
+		return *m.Instances
+	}
+	return 0
+}
+
+type SetNumInstancesRequest struct {
+	Module           *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+	Version          *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+	Instances        *int64  `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *SetNumInstancesRequest) Reset()         { *m = SetNumInstancesRequest{} }
+func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesRequest) ProtoMessage()    {}
+
+func (m *SetNumInstancesRequest) GetModule() string {
+	if m != nil && m.Module != nil {
+		return *m.Module
+	}
+	return ""
+}
+
+func (m *SetNumInstancesRequest) GetVersion() string {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return ""
+}
+
+func (m *SetNumInstancesRequest) GetInstances() int64 {
+	if m != nil && m.Instances != nil {
+		return *m.Instances
+	}
+	return 0
+}
+
+type SetNumInstancesResponse struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetNumInstancesResponse) Reset()         { *m = SetNumInstancesResponse{} }
+func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesResponse) ProtoMessage()    {}
+
+type StartModuleRequest struct {
+	Module           *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"`
+	Version          *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *StartModuleRequest) Reset()         { *m = StartModuleRequest{} }
+func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StartModuleRequest) ProtoMessage()    {}
+
+func (m *StartModuleRequest) GetModule() string {
+	if m != nil && m.Module != nil {
+		return *m.Module
+	}
+	return ""
+}
+
+func (m *StartModuleRequest) GetVersion() string {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return ""
+}
+
+type StartModuleResponse struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartModuleResponse) Reset()         { *m = StartModuleResponse{} }
+func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StartModuleResponse) ProtoMessage()    {}
+
+type StopModuleRequest struct {
+	Module           *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+	Version          *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *StopModuleRequest) Reset()         { *m = StopModuleRequest{} }
+func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StopModuleRequest) ProtoMessage()    {}
+
+func (m *StopModuleRequest) GetModule() string {
+	if m != nil && m.Module != nil {
+		return *m.Module
+	}
+	return ""
+}
+
+func (m *StopModuleRequest) GetVersion() string {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return ""
+}
+
+type StopModuleResponse struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StopModuleResponse) Reset()         { *m = StopModuleResponse{} }
+func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StopModuleResponse) ProtoMessage()    {}
+
+type GetHostnameRequest struct {
+	Module           *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+	Version          *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+	Instance         *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *GetHostnameRequest) Reset()         { *m = GetHostnameRequest{} }
+func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameRequest) ProtoMessage()    {}
+
+func (m *GetHostnameRequest) GetModule() string {
+	if m != nil && m.Module != nil {
+		return *m.Module
+	}
+	return ""
+}
+
+func (m *GetHostnameRequest) GetVersion() string {
+	if m != nil && m.Version != nil {
+		return *m.Version
+	}
+	return ""
+}
+
+func (m *GetHostnameRequest) GetInstance() string {
+	if m != nil && m.Instance != nil {
+		return *m.Instance
+	}
+	return ""
+}
+
+type GetHostnameResponse struct {
+	Hostname         *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *GetHostnameResponse) Reset()         { *m = GetHostnameResponse{} }
+func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameResponse) ProtoMessage()    {}
+
+func (m *GetHostnameResponse) GetHostname() string {
+	if m != nil && m.Hostname != nil {
+		return *m.Hostname
+	}
+	return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
new file mode 100644
index 00000000..d29f0065
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
@@ -0,0 +1,80 @@
+syntax = "proto2";
+option go_package = "modules";
+
+package appengine;
+
+message ModulesServiceError {
+  enum ErrorCode {
+    OK  = 0;
+    INVALID_MODULE = 1;
+    INVALID_VERSION = 2;
+    INVALID_INSTANCES = 3;
+    TRANSIENT_ERROR = 4;
+    UNEXPECTED_STATE = 5;
+  }
+}
+
+message GetModulesRequest {
+}
+
+message GetModulesResponse {
+  repeated string module = 1;
+}
+
+message GetVersionsRequest {
+  optional string module = 1;
+}
+
+message GetVersionsResponse {
+  repeated string version = 1;
+}
+
+message GetDefaultVersionRequest {
+  optional string module = 1;
+}
+
+message GetDefaultVersionResponse {
+  required string version = 1;
+}
+
+message GetNumInstancesRequest {
+  optional string module = 1;
+  optional string version = 2;
+}
+
+message GetNumInstancesResponse {
+  required int64 instances = 1;
+}
+
+message SetNumInstancesRequest {
+  optional string module = 1;
+  optional string version = 2;
+  required int64 instances = 3;
+}
+
+message SetNumInstancesResponse {}
+
+message StartModuleRequest {
+  required string module = 1;
+  required string version = 2;
+}
+
+message StartModuleResponse {}
+
+message StopModuleRequest {
+  optional string module = 1;
+  optional string version = 2;
+}
+
+message StopModuleResponse {}
+
+message GetHostnameRequest {
+  optional string module = 1;
+  optional string version = 2;
+  optional string instance = 3;
+}
+
+message GetHostnameResponse {
+  required string hostname = 1;
+}
+
diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
new file mode 100644
index 00000000..3b94cf0c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/net.go
@@ -0,0 +1,56 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements a network dialer that limits the number of concurrent connections.
+// It is only used for API calls.
+
+import (
+	"log"
+	"net"
+	"runtime"
+	"sync"
+	"time"
+)
+
+var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
+
+func limitRelease() {
+	// non-blocking
+	select {
+	case <-limitSem:
+	default:
+		// This should not normally happen.
+		log.Print("appengine: unbalanced limitSem release!")
+	}
+}
+
+func limitDial(network, addr string) (net.Conn, error) {
+	limitSem <- 1
+
+	// Dial with a timeout in case the API host is MIA.
+	// The connection should normally be very fast.
+	conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
+	if err != nil {
+		limitRelease()
+		return nil, err
+	}
+	lc := &limitConn{Conn: conn}
+	runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
+	return lc, nil
+}
+
+type limitConn struct {
+	close sync.Once
+	net.Conn
+}
+
+func (lc *limitConn) Close() error {
+	defer lc.close.Do(func() {
+		limitRelease()
+		runtime.SetFinalizer(lc, nil)
+	})
+	return lc.Conn.Close()
+}
diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
new file mode 100755
index 00000000..2fdb546a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/regen.sh
@@ -0,0 +1,40 @@
+#!/bin/bash -e
+#
+# This script rebuilds the generated code for the protocol buffers.
+# To run this you will need protoc and goprotobuf installed;
+# see https://github.com/golang/protobuf for instructions.
+
+PKG=google.golang.org/appengine
+
+function die() {
+	echo 1>&2 $*
+	exit 1
+}
+
+# Sanity check that the right tools are accessible.
+for tool in go protoc protoc-gen-go; do
+	q=$(which $tool) || die "didn't find $tool"
+	echo 1>&2 "$tool: $q"
+done
+
+echo -n 1>&2 "finding package dir... "
+pkgdir=$(go list -f '{{.Dir}}' $PKG)
+echo 1>&2 $pkgdir
+base=$(echo $pkgdir | sed "s,/$PKG\$,,")
+echo 1>&2 "base: $base"
+cd $base
+
+# Run protoc once per package.
+for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
+	echo 1>&2 "* $dir"
+	protoc --go_out=. $dir/*.proto
+done
+
+for f in $(find $PKG/internal -name '*.pb.go'); do
+  # Remove proto.RegisterEnum calls.
+  # These cause duplicate registration panics when these packages
+  # are used on classic App Engine. proto.RegisterEnum only affects
+  # parsing the text format; we don't care about that.
+  # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
+  sed -i '/proto.RegisterEnum/d' $f
+done
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
new file mode 100644
index 00000000..526bd39e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
@@ -0,0 +1,231 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
+// DO NOT EDIT!
+
+/*
+Package remote_api is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/remote_api/remote_api.proto
+
+It has these top-level messages:
+	Request
+	ApplicationError
+	RpcError
+	Response
+*/
+package remote_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type RpcError_ErrorCode int32
+
+const (
+	RpcError_UNKNOWN             RpcError_ErrorCode = 0
+	RpcError_CALL_NOT_FOUND      RpcError_ErrorCode = 1
+	RpcError_PARSE_ERROR         RpcError_ErrorCode = 2
+	RpcError_SECURITY_VIOLATION  RpcError_ErrorCode = 3
+	RpcError_OVER_QUOTA          RpcError_ErrorCode = 4
+	RpcError_REQUEST_TOO_LARGE   RpcError_ErrorCode = 5
+	RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
+	RpcError_FEATURE_DISABLED    RpcError_ErrorCode = 7
+	RpcError_BAD_REQUEST         RpcError_ErrorCode = 8
+	RpcError_RESPONSE_TOO_LARGE  RpcError_ErrorCode = 9
+	RpcError_CANCELLED           RpcError_ErrorCode = 10
+	RpcError_REPLAY_ERROR        RpcError_ErrorCode = 11
+	RpcError_DEADLINE_EXCEEDED   RpcError_ErrorCode = 12
+)
+
+var RpcError_ErrorCode_name = map[int32]string{
+	0:  "UNKNOWN",
+	1:  "CALL_NOT_FOUND",
+	2:  "PARSE_ERROR",
+	3:  "SECURITY_VIOLATION",
+	4:  "OVER_QUOTA",
+	5:  "REQUEST_TOO_LARGE",
+	6:  "CAPABILITY_DISABLED",
+	7:  "FEATURE_DISABLED",
+	8:  "BAD_REQUEST",
+	9:  "RESPONSE_TOO_LARGE",
+	10: "CANCELLED",
+	11: "REPLAY_ERROR",
+	12: "DEADLINE_EXCEEDED",
+}
+var RpcError_ErrorCode_value = map[string]int32{
+	"UNKNOWN":             0,
+	"CALL_NOT_FOUND":      1,
+	"PARSE_ERROR":         2,
+	"SECURITY_VIOLATION":  3,
+	"OVER_QUOTA":          4,
+	"REQUEST_TOO_LARGE":   5,
+	"CAPABILITY_DISABLED": 6,
+	"FEATURE_DISABLED":    7,
+	"BAD_REQUEST":         8,
+	"RESPONSE_TOO_LARGE":  9,
+	"CANCELLED":           10,
+	"REPLAY_ERROR":        11,
+	"DEADLINE_EXCEEDED":   12,
+}
+
+func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
+	p := new(RpcError_ErrorCode)
+	*p = x
+	return p
+}
+func (x RpcError_ErrorCode) String() string {
+	return proto.EnumName(RpcError_ErrorCode_name, int32(x))
+}
+func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
+	if err != nil {
+		return err
+	}
+	*x = RpcError_ErrorCode(value)
+	return nil
+}
+
+type Request struct {
+	ServiceName      *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"`
+	Method           *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
+	Request          []byte  `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
+	RequestId        *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *Request) Reset()         { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage()    {}
+
+func (m *Request) GetServiceName() string {
+	if m != nil && m.ServiceName != nil {
+		return *m.ServiceName
+	}
+	return ""
+}
+
+func (m *Request) GetMethod() string {
+	if m != nil && m.Method != nil {
+		return *m.Method
+	}
+	return ""
+}
+
+func (m *Request) GetRequest() []byte {
+	if m != nil {
+		return m.Request
+	}
+	return nil
+}
+
+func (m *Request) GetRequestId() string {
+	if m != nil && m.RequestId != nil {
+		return *m.RequestId
+	}
+	return ""
+}
+
+type ApplicationError struct {
+	Code             *int32  `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+	Detail           *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *ApplicationError) Reset()         { *m = ApplicationError{} }
+func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
+func (*ApplicationError) ProtoMessage()    {}
+
+func (m *ApplicationError) GetCode() int32 {
+	if m != nil && m.Code != nil {
+		return *m.Code
+	}
+	return 0
+}
+
+func (m *ApplicationError) GetDetail() string {
+	if m != nil && m.Detail != nil {
+		return *m.Detail
+	}
+	return ""
+}
+
+type RpcError struct {
+	Code             *int32  `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+	Detail           *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *RpcError) Reset()         { *m = RpcError{} }
+func (m *RpcError) String() string { return proto.CompactTextString(m) }
+func (*RpcError) ProtoMessage()    {}
+
+func (m *RpcError) GetCode() int32 {
+	if m != nil && m.Code != nil {
+		return *m.Code
+	}
+	return 0
+}
+
+func (m *RpcError) GetDetail() string {
+	if m != nil && m.Detail != nil {
+		return *m.Detail
+	}
+	return ""
+}
+
+type Response struct {
+	Response         []byte            `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
+	Exception        []byte            `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
+	ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"`
+	JavaException    []byte            `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"`
+	RpcError         *RpcError         `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"`
+	XXX_unrecognized []byte            `json:"-"`
+}
+
+func (m *Response) Reset()         { *m = Response{} }
+func (m *Response) String() string { return proto.CompactTextString(m) }
+func (*Response) ProtoMessage()    {}
+
+func (m *Response) GetResponse() []byte {
+	if m != nil {
+		return m.Response
+	}
+	return nil
+}
+
+func (m *Response) GetException() []byte {
+	if m != nil {
+		return m.Exception
+	}
+	return nil
+}
+
+func (m *Response) GetApplicationError() *ApplicationError {
+	if m != nil {
+		return m.ApplicationError
+	}
+	return nil
+}
+
+func (m *Response) GetJavaException() []byte {
+	if m != nil {
+		return m.JavaException
+	}
+	return nil
+}
+
+func (m *Response) GetRpcError() *RpcError {
+	if m != nil {
+		return m.RpcError
+	}
+	return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
new file mode 100644
index 00000000..f21763a4
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
@@ -0,0 +1,44 @@
+syntax = "proto2";
+option go_package = "remote_api";
+
+package remote_api;
+
+message Request {
+  required string service_name = 2;
+  required string method = 3;
+  required bytes request = 4;
+  optional string request_id = 5;
+}
+
+message ApplicationError {
+  required int32 code = 1;
+  required string detail = 2;
+}
+
+message RpcError {
+  enum ErrorCode {
+    UNKNOWN = 0;
+    CALL_NOT_FOUND = 1;
+    PARSE_ERROR = 2;
+    SECURITY_VIOLATION = 3;
+    OVER_QUOTA = 4;
+    REQUEST_TOO_LARGE = 5;
+    CAPABILITY_DISABLED = 6;
+    FEATURE_DISABLED = 7;
+    BAD_REQUEST = 8;
+    RESPONSE_TOO_LARGE = 9;
+    CANCELLED = 10;
+    REPLAY_ERROR = 11;
+    DEADLINE_EXCEEDED = 12;
+  }
+  required int32 code = 1;
+  optional string detail = 2;
+}
+
+message Response {
+  optional bytes response = 1;
+  optional bytes exception = 2;
+  optional ApplicationError application_error = 3;
+  optional bytes java_exception = 4;
+  optional RpcError rpc_error = 5;
+}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
new file mode 100644
index 00000000..28a6d181
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/transaction.go
@@ -0,0 +1,107 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements hooks for applying datastore transactions.
+
+import (
+	"errors"
+	"reflect"
+
+	"github.com/golang/protobuf/proto"
+	netcontext "golang.org/x/net/context"
+
+	basepb "google.golang.org/appengine/internal/base"
+	pb "google.golang.org/appengine/internal/datastore"
+)
+
+var transactionSetters = make(map[reflect.Type]reflect.Value)
+
+// RegisterTransactionSetter registers a function that sets transaction information
+// in a protocol buffer message. f should be a function with two arguments,
+// the first being a protocol buffer type, and the second being *datastore.Transaction.
+func RegisterTransactionSetter(f interface{}) {
+	v := reflect.ValueOf(f)
+	transactionSetters[v.Type().In(0)] = v
+}
+
+// applyTransaction applies the transaction t to message pb
+// by using the relevant setter passed to RegisterTransactionSetter.
+func applyTransaction(pb proto.Message, t *pb.Transaction) {
+	v := reflect.ValueOf(pb)
+	if f, ok := transactionSetters[v.Type()]; ok {
+		f.Call([]reflect.Value{v, reflect.ValueOf(t)})
+	}
+}
+
+var transactionKey = "used for *Transaction"
+
+func transactionFromContext(ctx netcontext.Context) *transaction {
+	t, _ := ctx.Value(&transactionKey).(*transaction)
+	return t
+}
+
+func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
+	return netcontext.WithValue(ctx, &transactionKey, t)
+}
+
+type transaction struct {
+	transaction pb.Transaction
+	finished    bool
+}
+
+var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
+
+func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error {
+	if transactionFromContext(c) != nil {
+		return errors.New("nested transactions are not supported")
+	}
+
+	// Begin the transaction.
+	t := &transaction{}
+	req := &pb.BeginTransactionRequest{
+		App: proto.String(FullyQualifiedAppID(c)),
+	}
+	if xg {
+		req.AllowMultipleEg = proto.Bool(true)
+	}
+	if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
+		return err
+	}
+
+	// Call f, rolling back the transaction if f returns a non-nil error, or panics.
+	// The panic is not recovered.
+	defer func() {
+		if t.finished {
+			return
+		}
+		t.finished = true
+		// Ignore the error return value, since we are already returning a non-nil
+		// error (or we're panicking).
+		Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
+	}()
+	if err := f(withTransaction(c, t)); err != nil {
+		return err
+	}
+	t.finished = true
+
+	// Commit the transaction.
+	res := &pb.CommitResponse{}
+	err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
+	if ae, ok := err.(*APIError); ok {
+		/* TODO: restore this conditional
+		if appengine.IsDevAppServer() {
+		*/
+		// The Python Dev AppServer raises an ApplicationError with error code 2 (which is
+		// Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
+		if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
+			return ErrConcurrentTransaction
+		}
+		if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
+			return ErrConcurrentTransaction
+		}
+	}
+	return err
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
new file mode 100644
index 00000000..af463fbb
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
@@ -0,0 +1,355 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+// DO NOT EDIT!
+
+/*
+Package urlfetch is a generated protocol buffer package.
+
+It is generated from these files:
+	google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+
+It has these top-level messages:
+	URLFetchServiceError
+	URLFetchRequest
+	URLFetchResponse
+*/
+package urlfetch
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type URLFetchServiceError_ErrorCode int32
+
+const (
+	URLFetchServiceError_OK                       URLFetchServiceError_ErrorCode = 0
+	URLFetchServiceError_INVALID_URL              URLFetchServiceError_ErrorCode = 1
+	URLFetchServiceError_FETCH_ERROR              URLFetchServiceError_ErrorCode = 2
+	URLFetchServiceError_UNSPECIFIED_ERROR        URLFetchServiceError_ErrorCode = 3
+	URLFetchServiceError_RESPONSE_TOO_LARGE       URLFetchServiceError_ErrorCode = 4
+	URLFetchServiceError_DEADLINE_EXCEEDED        URLFetchServiceError_ErrorCode = 5
+	URLFetchServiceError_SSL_CERTIFICATE_ERROR    URLFetchServiceError_ErrorCode = 6
+	URLFetchServiceError_DNS_ERROR                URLFetchServiceError_ErrorCode = 7
+	URLFetchServiceError_CLOSED                   URLFetchServiceError_ErrorCode = 8
+	URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
+	URLFetchServiceError_TOO_MANY_REDIRECTS       URLFetchServiceError_ErrorCode = 10
+	URLFetchServiceError_MALFORMED_REPLY          URLFetchServiceError_ErrorCode = 11
+	URLFetchServiceError_CONNECTION_ERROR         URLFetchServiceError_ErrorCode = 12
+)
+
+var URLFetchServiceError_ErrorCode_name = map[int32]string{
+	0:  "OK",
+	1:  "INVALID_URL",
+	2:  "FETCH_ERROR",
+	3:  "UNSPECIFIED_ERROR",
+	4:  "RESPONSE_TOO_LARGE",
+	5:  "DEADLINE_EXCEEDED",
+	6:  "SSL_CERTIFICATE_ERROR",
+	7:  "DNS_ERROR",
+	8:  "CLOSED",
+	9:  "INTERNAL_TRANSIENT_ERROR",
+	10: "TOO_MANY_REDIRECTS",
+	11: "MALFORMED_REPLY",
+	12: "CONNECTION_ERROR",
+}
+var URLFetchServiceError_ErrorCode_value = map[string]int32{
+	"OK":                       0,
+	"INVALID_URL":              1,
+	"FETCH_ERROR":              2,
+	"UNSPECIFIED_ERROR":        3,
+	"RESPONSE_TOO_LARGE":       4,
+	"DEADLINE_EXCEEDED":        5,
+	"SSL_CERTIFICATE_ERROR":    6,
+	"DNS_ERROR":                7,
+	"CLOSED":                   8,
+	"INTERNAL_TRANSIENT_ERROR": 9,
+	"TOO_MANY_REDIRECTS":       10,
+	"MALFORMED_REPLY":          11,
+	"CONNECTION_ERROR":         12,
+}
+
+func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
+	p := new(URLFetchServiceError_ErrorCode)
+	*p = x
+	return p
+}
+func (x URLFetchServiceError_ErrorCode) String() string {
+	return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
+}
+func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
+	if err != nil {
+		return err
+	}
+	*x = URLFetchServiceError_ErrorCode(value)
+	return nil
+}
+
+type URLFetchRequest_RequestMethod int32
+
+const (
+	URLFetchRequest_GET    URLFetchRequest_RequestMethod = 1
+	URLFetchRequest_POST   URLFetchRequest_RequestMethod = 2
+	URLFetchRequest_HEAD   URLFetchRequest_RequestMethod = 3
+	URLFetchRequest_PUT    URLFetchRequest_RequestMethod = 4
+	URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
+	URLFetchRequest_PATCH  URLFetchRequest_RequestMethod = 6
+)
+
+var URLFetchRequest_RequestMethod_name = map[int32]string{
+	1: "GET",
+	2: "POST",
+	3: "HEAD",
+	4: "PUT",
+	5: "DELETE",
+	6: "PATCH",
+}
+var URLFetchRequest_RequestMethod_value = map[string]int32{
+	"GET":    1,
+	"POST":   2,
+	"HEAD":   3,
+	"PUT":    4,
+	"DELETE": 5,
+	"PATCH":  6,
+}
+
+func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
+	p := new(URLFetchRequest_RequestMethod)
+	*p = x
+	return p
+}
+func (x URLFetchRequest_RequestMethod) String() string {
+	return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
+}
+func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
+	if err != nil {
+		return err
+	}
+	*x = URLFetchRequest_RequestMethod(value)
+	return nil
+}
+
+type URLFetchServiceError struct {
+	XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchServiceError) Reset()         { *m = URLFetchServiceError{} }
+func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
+func (*URLFetchServiceError) ProtoMessage()    {}
+
+type URLFetchRequest struct {
+	Method                        *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
+	Url                           *string                        `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
+	Header                        []*URLFetchRequest_Header      `protobuf:"group,3,rep,name=Header" json:"header,omitempty"`
+	Payload                       []byte                         `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
+	FollowRedirects               *bool                          `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
+	Deadline                      *float64                       `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
+	MustValidateServerCertificate *bool                          `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
+	XXX_unrecognized              []byte                         `json:"-"`
+}
+
+func (m *URLFetchRequest) Reset()         { *m = URLFetchRequest{} }
+func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest) ProtoMessage()    {}
+
+const Default_URLFetchRequest_FollowRedirects bool = true
+const Default_URLFetchRequest_MustValidateServerCertificate bool = true
+
+func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
+	if m != nil && m.Method != nil {
+		return *m.Method
+	}
+	return URLFetchRequest_GET
+}
+
+func (m *URLFetchRequest) GetUrl() string {
+	if m != nil && m.Url != nil {
+		return *m.Url
+	}
+	return ""
+}
+
+func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *URLFetchRequest) GetPayload() []byte {
+	if m != nil {
+		return m.Payload
+	}
+	return nil
+}
+
+func (m *URLFetchRequest) GetFollowRedirects() bool {
+	if m != nil && m.FollowRedirects != nil {
+		return *m.FollowRedirects
+	}
+	return Default_URLFetchRequest_FollowRedirects
+}
+
+func (m *URLFetchRequest) GetDeadline() float64 {
+	if m != nil && m.Deadline != nil {
+		return *m.Deadline
+	}
+	return 0
+}
+
+func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
+	if m != nil && m.MustValidateServerCertificate != nil {
+		return *m.MustValidateServerCertificate
+	}
+	return Default_URLFetchRequest_MustValidateServerCertificate
+}
+
+type URLFetchRequest_Header struct {
+	Key              *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+	Value            *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *URLFetchRequest_Header) Reset()         { *m = URLFetchRequest_Header{} }
+func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest_Header) ProtoMessage()    {}
+
+func (m *URLFetchRequest_Header) GetKey() string {
+	if m != nil && m.Key != nil {
+		return *m.Key
+	}
+	return ""
+}
+
+func (m *URLFetchRequest_Header) GetValue() string {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return ""
+}
+
+type URLFetchResponse struct {
+	Content               []byte                     `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
+	StatusCode            *int32                     `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
+	Header                []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"`
+	ContentWasTruncated   *bool                      `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
+	ExternalBytesSent     *int64                     `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
+	ExternalBytesReceived *int64                     `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
+	FinalUrl              *string                    `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
+	ApiCpuMilliseconds    *int64                     `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
+	ApiBytesSent          *int64                     `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
+	ApiBytesReceived      *int64                     `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
+	XXX_unrecognized      []byte                     `json:"-"`
+}
+
+func (m *URLFetchResponse) Reset()         { *m = URLFetchResponse{} }
+func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse) ProtoMessage()    {}
+
+const Default_URLFetchResponse_ContentWasTruncated bool = false
+const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
+const Default_URLFetchResponse_ApiBytesSent int64 = 0
+const Default_URLFetchResponse_ApiBytesReceived int64 = 0
+
+func (m *URLFetchResponse) GetContent() []byte {
+	if m != nil {
+		return m.Content
+	}
+	return nil
+}
+
+func (m *URLFetchResponse) GetStatusCode() int32 {
+	if m != nil && m.StatusCode != nil {
+		return *m.StatusCode
+	}
+	return 0
+}
+
+func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *URLFetchResponse) GetContentWasTruncated() bool {
+	if m != nil && m.ContentWasTruncated != nil {
+		return *m.ContentWasTruncated
+	}
+	return Default_URLFetchResponse_ContentWasTruncated
+}
+
+func (m *URLFetchResponse) GetExternalBytesSent() int64 {
+	if m != nil && m.ExternalBytesSent != nil {
+		return *m.ExternalBytesSent
+	}
+	return 0
+}
+
+func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
+	if m != nil && m.ExternalBytesReceived != nil {
+		return *m.ExternalBytesReceived
+	}
+	return 0
+}
+
+func (m *URLFetchResponse) GetFinalUrl() string {
+	if m != nil && m.FinalUrl != nil {
+		return *m.FinalUrl
+	}
+	return ""
+}
+
+func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
+	if m != nil && m.ApiCpuMilliseconds != nil {
+		return *m.ApiCpuMilliseconds
+	}
+	return Default_URLFetchResponse_ApiCpuMilliseconds
+}
+
+func (m *URLFetchResponse) GetApiBytesSent() int64 {
+	if m != nil && m.ApiBytesSent != nil {
+		return *m.ApiBytesSent
+	}
+	return Default_URLFetchResponse_ApiBytesSent
+}
+
+func (m *URLFetchResponse) GetApiBytesReceived() int64 {
+	if m != nil && m.ApiBytesReceived != nil {
+		return *m.ApiBytesReceived
+	}
+	return Default_URLFetchResponse_ApiBytesReceived
+}
+
+type URLFetchResponse_Header struct {
+	Key              *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+	Value            *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (m *URLFetchResponse_Header) Reset()         { *m = URLFetchResponse_Header{} }
+func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse_Header) ProtoMessage()    {}
+
+func (m *URLFetchResponse_Header) GetKey() string {
+	if m != nil && m.Key != nil {
+		return *m.Key
+	}
+	return ""
+}
+
+func (m *URLFetchResponse_Header) GetValue() string {
+	if m != nil && m.Value != nil {
+		return *m.Value
+	}
+	return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
new file mode 100644
index 00000000..f695edf6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "urlfetch";
+
+package appengine;
+
+message URLFetchServiceError {
+  enum ErrorCode {
+    OK = 0;
+    INVALID_URL = 1;
+    FETCH_ERROR = 2;
+    UNSPECIFIED_ERROR = 3;
+    RESPONSE_TOO_LARGE = 4;
+    DEADLINE_EXCEEDED = 5;
+    SSL_CERTIFICATE_ERROR = 6;
+    DNS_ERROR = 7;
+    CLOSED = 8;
+    INTERNAL_TRANSIENT_ERROR = 9;
+    TOO_MANY_REDIRECTS = 10;
+    MALFORMED_REPLY = 11;
+    CONNECTION_ERROR = 12;
+  }
+}
+
+message URLFetchRequest {
+  enum RequestMethod {
+    GET = 1;
+    POST = 2;
+    HEAD = 3;
+    PUT = 4;
+    DELETE = 5;
+    PATCH = 6;
+  }
+  required RequestMethod Method = 1;
+  required string Url = 2;
+  repeated group Header = 3 {
+    required string Key = 4;
+    required string Value = 5;
+  }
+  optional bytes Payload = 6 [ctype=CORD];
+
+  optional bool FollowRedirects = 7 [default=true];
+
+  optional double Deadline = 8;
+
+  optional bool MustValidateServerCertificate = 9 [default=true];
+}
+
+message URLFetchResponse {
+  optional bytes Content = 1;
+  required int32 StatusCode = 2;
+  repeated group Header = 3 {
+    required string Key = 4;
+    required string Value = 5;
+  }
+  optional bool ContentWasTruncated = 6 [default=false];
+  optional int64 ExternalBytesSent = 7;
+  optional int64 ExternalBytesReceived = 8;
+
+  optional string FinalUrl = 9;
+
+  optional int64 ApiCpuMilliseconds = 10 [default=0];
+  optional int64 ApiBytesSent = 11 [default=0];
+  optional int64 ApiBytesReceived = 12 [default=0];
+}
diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go
new file mode 100644
index 00000000..21860ca0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/namespace.go
@@ -0,0 +1,25 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+	"fmt"
+	"regexp"
+
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+)
+
+// Namespace returns a replacement context that operates within the given namespace.
+func Namespace(c context.Context, namespace string) (context.Context, error) {
+	if !validNamespace.MatchString(namespace) {
+		return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace)
+	}
+	return internal.NamespacedContext(c, namespace), nil
+}
+
+// validNamespace matches valid namespace names.
+var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`)
diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go
new file mode 100644
index 00000000..05642a99
--- /dev/null
+++ b/vendor/google.golang.org/appengine/timeout.go
@@ -0,0 +1,20 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import "golang.org/x/net/context"
+
+// IsTimeoutError reports whether err is a timeout error.
+func IsTimeoutError(err error) bool {
+	if err == context.DeadlineExceeded {
+		return true
+	}
+	if t, ok := err.(interface {
+		IsTimeout() bool
+	}); ok {
+		return t.IsTimeout()
+	}
+	return false
+}
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
new file mode 100644
index 00000000..6ffe1e6d
--- /dev/null
+++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
@@ -0,0 +1,210 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package urlfetch provides an http.RoundTripper implementation
+// for fetching URLs via App Engine's urlfetch service.
+package urlfetch // import "google.golang.org/appengine/urlfetch"
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+
+	"google.golang.org/appengine/internal"
+	pb "google.golang.org/appengine/internal/urlfetch"
+)
+
+// Transport is an implementation of http.RoundTripper for
+// App Engine. Users should generally create an http.Client using
+// this transport and use the Client rather than using this transport
+// directly.
+type Transport struct {
+	Context context.Context
+
+	// Controls whether the application checks the validity of SSL certificates
+	// over HTTPS connections. A value of false (the default) instructs the
+	// application to send a request to the server only if the certificate is
+	// valid and signed by a trusted certificate authority (CA), and also
+	// includes a hostname that matches the certificate. A value of true
+	// instructs the application to perform no certificate validation.
+	AllowInvalidServerCertificate bool
+}
+
+// Verify statically that *Transport implements http.RoundTripper.
+var _ http.RoundTripper = (*Transport)(nil)
+
+// Client returns an *http.Client using a default urlfetch Transport. This
+// client will have the default deadline of 5 seconds, and will check the
+// validity of SSL certificates.
+//
+// Any deadline of the provided context will be used for requests through this client;
+// if the client does not have a deadline then a 5 second default is used.
+func Client(ctx context.Context) *http.Client {
+	return &http.Client{
+		Transport: &Transport{
+			Context: ctx,
+		},
+	}
+}
+
+type bodyReader struct {
+	content   []byte
+	truncated bool
+	closed    bool
+}
+
+// ErrTruncatedBody is the error returned after the final Read() from a
+// response's Body if the body has been truncated by App Engine's proxy.
+var ErrTruncatedBody = errors.New("urlfetch: truncated body")
+
+func statusCodeToText(code int) string {
+	if t := http.StatusText(code); t != "" {
+		return t
+	}
+	return strconv.Itoa(code)
+}
+
+func (br *bodyReader) Read(p []byte) (n int, err error) {
+	if br.closed {
+		if br.truncated {
+			return 0, ErrTruncatedBody
+		}
+		return 0, io.EOF
+	}
+	n = copy(p, br.content)
+	if n > 0 {
+		br.content = br.content[n:]
+		return
+	}
+	if br.truncated {
+		br.closed = true
+		return 0, ErrTruncatedBody
+	}
+	return 0, io.EOF
+}
+
+func (br *bodyReader) Close() error {
+	br.closed = true
+	br.content = nil
+	return nil
+}
+
+// A map of the URL Fetch-accepted methods that take a request body.
+var methodAcceptsRequestBody = map[string]bool{
+	"POST":  true,
+	"PUT":   true,
+	"PATCH": true,
+}
+
+// urlString returns a valid string given a URL. This function is necessary because
+// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
+// See http://code.google.com/p/go/issues/detail?id=4860.
+func urlString(u *url.URL) string {
+	if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
+		return u.String()
+	}
+	aux := *u
+	aux.Opaque = "//" + aux.Host + aux.Opaque
+	return aux.String()
+}
+
+// RoundTrip issues a single HTTP request and returns its response. Per the
+// http.RoundTripper interface, RoundTrip only returns an error if there
+// was an unsupported request or the URL Fetch proxy fails.
+// Note that HTTP response codes such as 5xx, 403, 404, etc are not
+// errors as far as the transport is concerned and will be returned
+// with err set to nil.
+func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
+	methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
+	if !ok {
+		return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
+	}
+
+	method := pb.URLFetchRequest_RequestMethod(methNum)
+
+	freq := &pb.URLFetchRequest{
+		Method:                        &method,
+		Url:                           proto.String(urlString(req.URL)),
+		FollowRedirects:               proto.Bool(false), // http.Client's responsibility
+		MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
+	}
+	if deadline, ok := t.Context.Deadline(); ok {
+		freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
+	}
+
+	for k, vals := range req.Header {
+		for _, val := range vals {
+			freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
+				Key:   proto.String(k),
+				Value: proto.String(val),
+			})
+		}
+	}
+	if methodAcceptsRequestBody[req.Method] && req.Body != nil {
+		// Avoid a []byte copy if req.Body has a Bytes method.
+		switch b := req.Body.(type) {
+		case interface {
+			Bytes() []byte
+		}:
+			freq.Payload = b.Bytes()
+		default:
+			freq.Payload, err = ioutil.ReadAll(req.Body)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	fres := &pb.URLFetchResponse{}
+	if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
+		return nil, err
+	}
+
+	res = &http.Response{}
+	res.StatusCode = int(*fres.StatusCode)
+	res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
+	res.Header = make(http.Header)
+	res.Request = req
+
+	// Faked:
+	res.ProtoMajor = 1
+	res.ProtoMinor = 1
+	res.Proto = "HTTP/1.1"
+	res.Close = true
+
+	for _, h := range fres.Header {
+		hkey := http.CanonicalHeaderKey(*h.Key)
+		hval := *h.Value
+		if hkey == "Content-Length" {
+			// Will get filled in below for all but HEAD requests.
+			if req.Method == "HEAD" {
+				res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
+			}
+			continue
+		}
+		res.Header.Add(hkey, hval)
+	}
+
+	if req.Method != "HEAD" {
+		res.ContentLength = int64(len(fres.Content))
+	}
+
+	truncated := fres.GetContentWasTruncated()
+	res.Body = &bodyReader{content: fres.Content, truncated: truncated}
+	return
+}
+
+func init() {
+	internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
+	internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
+}
diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD
new file mode 100644
index 00000000..72349cc6
--- /dev/null
+++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/BUILD
@@ -0,0 +1,32 @@
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+load(
+    "@io_bazel_rules_go//go:def.bzl",
+    "go_library",
+    "go_test",
+)
+
+go_test(
+    name = "go_default_test",
+    srcs = ["gcp_test.go"],
+    library = ":go_default_library",
+    tags = ["automanaged"],
+    deps = ["//vendor/golang.org/x/oauth2:go_default_library"],
+)
+
+go_library(
+    name = "go_default_library",
+    srcs = ["gcp.go"],
+    tags = ["automanaged"],
+    deps = [
+        "//vendor/github.com/golang/glog:go_default_library",
+        "//vendor/golang.org/x/net/context:go_default_library",
+        "//vendor/golang.org/x/oauth2:go_default_library",
+        "//vendor/golang.org/x/oauth2/google:go_default_library",
+        "//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
+        "//vendor/k8s.io/client-go/rest:go_default_library",
+        "//vendor/k8s.io/client-go/util/jsonpath:go_default_library",
+    ],
+)
diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS
new file mode 100644
index 00000000..6cf8db8a
--- /dev/null
+++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/OWNERS
@@ -0,0 +1,6 @@
+approvers:
+- cjcullen
+- jlowdermilk
+reviewers:
+- cjcullen
+- jlowdermilk
diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go
new file mode 100644
index 00000000..59a27bea
--- /dev/null
+++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/gcp/gcp.go
@@ -0,0 +1,308 @@
+/*
+Copyright 2016 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gcp
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"net/http"
+	"os/exec"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/golang/glog"
+	"golang.org/x/net/context"
+	"golang.org/x/oauth2"
+	"golang.org/x/oauth2/google"
+	"k8s.io/apimachinery/pkg/util/yaml"
+	restclient "k8s.io/client-go/rest"
+	"k8s.io/client-go/util/jsonpath"
+)
+
+func init() {
+	if err := restclient.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil {
+		glog.Fatalf("Failed to register gcp auth plugin: %v", err)
+	}
+}
+
+// Stubbable for testing
+var execCommand = exec.Command
+
+// gcpAuthProvider is an auth provider plugin that uses GCP credentials to provide
+// tokens for kubectl to authenticate itself to the apiserver. A sample json config
+// is provided below with all recognized options described.
+//
+// {
+//   'auth-provider': {
+//     # Required
+//     "name": "gcp",
+//
+//     'config': {
+//       # Caching options
+//
+//       # Raw string data representing cached access token.
+//       "access-token": "ya29.CjWdA4GiBPTt",
+//       # RFC3339Nano expiration timestamp for cached access token.
+//       "expiry": "2016-10-31 22:31:9.123",
+//
+//       # Command execution options
+//       # These options direct the plugin to execute a specified command and parse
+//       # token and expiry time from the output of the command.
+//
+//       # Command to execute for access token. Command output will be parsed as JSON.
+//       # If "cmd-args" is not present, this value will be split on whitespace, with
+//       # the first element interpreted as the command, remaining elements as args.
+//       "cmd-path": "/usr/bin/gcloud",
+//
+//       # Arguments to pass to command to execute for access token.
+//       "cmd-args": "config config-helper --output=json"
+//
+//       # JSONPath to the string field that represents the access token in
+//       # command output. If omitted, defaults to "{.access_token}".
+//       "token-key": "{.credential.access_token}",
+//
+//       # JSONPath to the string field that represents expiration timestamp
+//       # of the access token in the command output. If omitted, defaults to
+//       # "{.token_expiry}"
+//       "expiry-key": ""{.credential.token_expiry}",
+//
+//       # golang reference time in the format that the expiration timestamp uses.
+//       # If omitted, defaults to time.RFC3339Nano
+//       "time-fmt": "2006-01-02 15:04:05.999999999"
+//     }
+//   }
+// }
+//
+type gcpAuthProvider struct {
+	tokenSource oauth2.TokenSource
+	persister   restclient.AuthProviderConfigPersister
+}
+
+func newGCPAuthProvider(_ string, gcpConfig map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) {
+	var ts oauth2.TokenSource
+	var err error
+	if cmd, useCmd := gcpConfig["cmd-path"]; useCmd {
+		if len(cmd) == 0 {
+			return nil, fmt.Errorf("missing access token cmd")
+		}
+		var args []string
+		if cmdArgs, ok := gcpConfig["cmd-args"]; ok {
+			args = strings.Fields(cmdArgs)
+		} else {
+			fields := strings.Fields(cmd)
+			cmd = fields[0]
+			args = fields[1:]
+		}
+		ts = newCmdTokenSource(cmd, args, gcpConfig["token-key"], gcpConfig["expiry-key"], gcpConfig["time-fmt"])
+	} else {
+		ts, err = google.DefaultTokenSource(context.Background(), "https://www.googleapis.com/auth/cloud-platform")
+	}
+	if err != nil {
+		return nil, err
+	}
+	cts, err := newCachedTokenSource(gcpConfig["access-token"], gcpConfig["expiry"], persister, ts, gcpConfig)
+	if err != nil {
+		return nil, err
+	}
+	return &gcpAuthProvider{cts, persister}, nil
+}
+
+func (g *gcpAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper {
+	return &conditionalTransport{&oauth2.Transport{Source: g.tokenSource, Base: rt}, g.persister}
+}
+
+func (g *gcpAuthProvider) Login() error { return nil }
+
+type cachedTokenSource struct {
+	lk          sync.Mutex
+	source      oauth2.TokenSource
+	accessToken string
+	expiry      time.Time
+	persister   restclient.AuthProviderConfigPersister
+	cache       map[string]string
+}
+
+func newCachedTokenSource(accessToken, expiry string, persister restclient.AuthProviderConfigPersister, ts oauth2.TokenSource, cache map[string]string) (*cachedTokenSource, error) {
+	var expiryTime time.Time
+	if parsedTime, err := time.Parse(time.RFC3339Nano, expiry); err == nil {
+		expiryTime = parsedTime
+	}
+	if cache == nil {
+		cache = make(map[string]string)
+	}
+	return &cachedTokenSource{
+		source:      ts,
+		accessToken: accessToken,
+		expiry:      expiryTime,
+		persister:   persister,
+		cache:       cache,
+	}, nil
+}
+
+func (t *cachedTokenSource) Token() (*oauth2.Token, error) {
+	tok := t.cachedToken()
+	if tok.Valid() && !tok.Expiry.IsZero() {
+		return tok, nil
+	}
+	tok, err := t.source.Token()
+	if err != nil {
+		return nil, err
+	}
+	cache := t.update(tok)
+	if t.persister != nil {
+		if err := t.persister.Persist(cache); err != nil {
+			glog.V(4).Infof("Failed to persist token: %v", err)
+		}
+	}
+	return tok, nil
+}
+
+func (t *cachedTokenSource) cachedToken() *oauth2.Token {
+	t.lk.Lock()
+	defer t.lk.Unlock()
+	return &oauth2.Token{
+		AccessToken: t.accessToken,
+		TokenType:   "Bearer",
+		Expiry:      t.expiry,
+	}
+}
+
+func (t *cachedTokenSource) update(tok *oauth2.Token) map[string]string {
+	t.lk.Lock()
+	defer t.lk.Unlock()
+	t.accessToken = tok.AccessToken
+	t.expiry = tok.Expiry
+	ret := map[string]string{}
+	for k, v := range t.cache {
+		ret[k] = v
+	}
+	ret["access-token"] = t.accessToken
+	ret["expiry"] = t.expiry.Format(time.RFC3339Nano)
+	return ret
+}
+
+type commandTokenSource struct {
+	cmd       string
+	args      []string
+	tokenKey  string
+	expiryKey string
+	timeFmt   string
+}
+
+func newCmdTokenSource(cmd string, args []string, tokenKey, expiryKey, timeFmt string) *commandTokenSource {
+	if len(timeFmt) == 0 {
+		timeFmt = time.RFC3339Nano
+	}
+	if len(tokenKey) == 0 {
+		tokenKey = "{.access_token}"
+	}
+	if len(expiryKey) == 0 {
+		expiryKey = "{.token_expiry}"
+	}
+	return &commandTokenSource{
+		cmd:       cmd,
+		args:      args,
+		tokenKey:  tokenKey,
+		expiryKey: expiryKey,
+		timeFmt:   timeFmt,
+	}
+}
+
+func (c *commandTokenSource) Token() (*oauth2.Token, error) {
+	fullCmd := strings.Join(append([]string{c.cmd}, c.args...), " ")
+	cmd := execCommand(c.cmd, c.args...)
+	output, err := cmd.Output()
+	if err != nil {
+		return nil, fmt.Errorf("error executing access token command %q: err=%v output=%s", fullCmd, err, output)
+	}
+	token, err := c.parseTokenCmdOutput(output)
+	if err != nil {
+		return nil, fmt.Errorf("error parsing output for access token command %q: %v", fullCmd, err)
+	}
+	return token, nil
+}
+
+func (c *commandTokenSource) parseTokenCmdOutput(output []byte) (*oauth2.Token, error) {
+	output, err := yaml.ToJSON(output)
+	if err != nil {
+		return nil, err
+	}
+	var data interface{}
+	if err := json.Unmarshal(output, &data); err != nil {
+		return nil, err
+	}
+
+	accessToken, err := parseJSONPath(data, "token-key", c.tokenKey)
+	if err != nil {
+		return nil, fmt.Errorf("error parsing token-key %q from %q: %v", c.tokenKey, string(output), err)
+	}
+	expiryStr, err := parseJSONPath(data, "expiry-key", c.expiryKey)
+	if err != nil {
+		return nil, fmt.Errorf("error parsing expiry-key %q from %q: %v", c.expiryKey, string(output), err)
+	}
+	var expiry time.Time
+	if t, err := time.Parse(c.timeFmt, expiryStr); err != nil {
+		glog.V(4).Infof("Failed to parse token expiry from %s (fmt=%s): %v", expiryStr, c.timeFmt, err)
+	} else {
+		expiry = t
+	}
+
+	return &oauth2.Token{
+		AccessToken: accessToken,
+		TokenType:   "Bearer",
+		Expiry:      expiry,
+	}, nil
+}
+
+func parseJSONPath(input interface{}, name, template string) (string, error) {
+	j := jsonpath.New(name)
+	buf := new(bytes.Buffer)
+	if err := j.Parse(template); err != nil {
+		return "", err
+	}
+	if err := j.Execute(buf, input); err != nil {
+		return "", err
+	}
+	return buf.String(), nil
+}
+
+type conditionalTransport struct {
+	oauthTransport *oauth2.Transport
+	persister      restclient.AuthProviderConfigPersister
+}
+
+func (t *conditionalTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+	if len(req.Header.Get("Authorization")) != 0 {
+		return t.oauthTransport.Base.RoundTrip(req)
+	}
+
+	res, err := t.oauthTransport.RoundTrip(req)
+
+	if err != nil {
+		return nil, err
+	}
+
+	if res.StatusCode == 401 {
+		glog.V(4).Infof("The credentials that were supplied are invalid for the target cluster")
+		emptyCache := make(map[string]string)
+		t.persister.Persist(emptyCache)
+	}
+
+	return res, nil
+}
diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/template/BUILD b/vendor/k8s.io/client-go/third_party/forked/golang/template/BUILD
new file mode 100644
index 00000000..87440385
--- /dev/null
+++ b/vendor/k8s.io/client-go/third_party/forked/golang/template/BUILD
@@ -0,0 +1,17 @@
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+load(
+    "@io_bazel_rules_go//go:def.bzl",
+    "go_library",
+)
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "exec.go",
+        "funcs.go",
+    ],
+    tags = ["automanaged"],
+)
diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go b/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go
new file mode 100644
index 00000000..739fd350
--- /dev/null
+++ b/vendor/k8s.io/client-go/third_party/forked/golang/template/exec.go
@@ -0,0 +1,94 @@
+//This package is copied from Go library text/template.
+//The original private functions indirect and printableValue
+//are exported as public functions.
+package template
+
+import (
+	"fmt"
+	"reflect"
+)
+
+var Indirect = indirect
+var PrintableValue = printableValue
+
+var (
+	errorType       = reflect.TypeOf((*error)(nil)).Elem()
+	fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
+)
+
+// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
+// We indirect through pointers and empty interfaces (only) because
+// non-empty interfaces have methods we might need.
+func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
+	for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
+		if v.IsNil() {
+			return v, true
+		}
+		if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
+			break
+		}
+	}
+	return v, false
+}
+
+// printableValue returns the, possibly indirected, interface value inside v that
+// is best for a call to formatted printer.
+func printableValue(v reflect.Value) (interface{}, bool) {
+	if v.Kind() == reflect.Ptr {
+		v, _ = indirect(v) // fmt.Fprint handles nil.
+	}
+	if !v.IsValid() {
+		return "<no value>", true
+	}
+
+	if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
+		if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
+			v = v.Addr()
+		} else {
+			switch v.Kind() {
+			case reflect.Chan, reflect.Func:
+				return nil, false
+			}
+		}
+	}
+	return v.Interface(), true
+}
+
+// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
+func canBeNil(typ reflect.Type) bool {
+	switch typ.Kind() {
+	case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return true
+	}
+	return false
+}
+
+// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
+// and whether the value has a meaningful truth value.
+func isTrue(val reflect.Value) (truth, ok bool) {
+	if !val.IsValid() {
+		// Something like var x interface{}, never set. It's a form of nil.
+		return false, true
+	}
+	switch val.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		truth = val.Len() > 0
+	case reflect.Bool:
+		truth = val.Bool()
+	case reflect.Complex64, reflect.Complex128:
+		truth = val.Complex() != 0
+	case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
+		truth = !val.IsNil()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		truth = val.Int() != 0
+	case reflect.Float32, reflect.Float64:
+		truth = val.Float() != 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		truth = val.Uint() != 0
+	case reflect.Struct:
+		truth = true // Struct values are always true.
+	default:
+		return
+	}
+	return truth, true
+}
diff --git a/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go b/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go
new file mode 100644
index 00000000..27a008b0
--- /dev/null
+++ b/vendor/k8s.io/client-go/third_party/forked/golang/template/funcs.go
@@ -0,0 +1,599 @@
+//This package is copied from Go library text/template.
+//The original private functions eq, ge, gt, le, lt, and ne
+//are exported as public functions.
+package template
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"net/url"
+	"reflect"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+var Equal = eq
+var GreaterEqual = ge
+var Greater = gt
+var LessEqual = le
+var Less = lt
+var NotEqual = ne
+
+// FuncMap is the type of the map defining the mapping from names to functions.
+// Each function must have either a single return value, or two return values of
+// which the second has type error. In that case, if the second (error)
+// return value evaluates to non-nil during execution, execution terminates and
+// Execute returns that error.
+type FuncMap map[string]interface{}
+
+var builtins = FuncMap{
+	"and":      and,
+	"call":     call,
+	"html":     HTMLEscaper,
+	"index":    index,
+	"js":       JSEscaper,
+	"len":      length,
+	"not":      not,
+	"or":       or,
+	"print":    fmt.Sprint,
+	"printf":   fmt.Sprintf,
+	"println":  fmt.Sprintln,
+	"urlquery": URLQueryEscaper,
+
+	// Comparisons
+	"eq": eq, // ==
+	"ge": ge, // >=
+	"gt": gt, // >
+	"le": le, // <=
+	"lt": lt, // <
+	"ne": ne, // !=
+}
+
+var builtinFuncs = createValueFuncs(builtins)
+
+// createValueFuncs turns a FuncMap into a map[string]reflect.Value
+func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
+	m := make(map[string]reflect.Value)
+	addValueFuncs(m, funcMap)
+	return m
+}
+
+// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
+func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
+	for name, fn := range in {
+		v := reflect.ValueOf(fn)
+		if v.Kind() != reflect.Func {
+			panic("value for " + name + " not a function")
+		}
+		if !goodFunc(v.Type()) {
+			panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
+		}
+		out[name] = v
+	}
+}
+
+// AddFuncs adds to values the functions in funcs. It does no checking of the input -
+// call addValueFuncs first.
+func addFuncs(out, in FuncMap) {
+	for name, fn := range in {
+		out[name] = fn
+	}
+}
+
+// goodFunc checks that the function or method has the right result signature.
+func goodFunc(typ reflect.Type) bool {
+	// We allow functions with 1 result or 2 results where the second is an error.
+	switch {
+	case typ.NumOut() == 1:
+		return true
+	case typ.NumOut() == 2 && typ.Out(1) == errorType:
+		return true
+	}
+	return false
+}
+
+// findFunction looks for a function in the template, and global map.
+func findFunction(name string) (reflect.Value, bool) {
+	if fn := builtinFuncs[name]; fn.IsValid() {
+		return fn, true
+	}
+	return reflect.Value{}, false
+}
+
+// Indexing.
+
+// index returns the result of indexing its first argument by the following
+// arguments.  Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
+// indexed item must be a map, slice, or array.
+func index(item interface{}, indices ...interface{}) (interface{}, error) {
+	v := reflect.ValueOf(item)
+	for _, i := range indices {
+		index := reflect.ValueOf(i)
+		var isNil bool
+		if v, isNil = indirect(v); isNil {
+			return nil, fmt.Errorf("index of nil pointer")
+		}
+		switch v.Kind() {
+		case reflect.Array, reflect.Slice, reflect.String:
+			var x int64
+			switch index.Kind() {
+			case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+				x = index.Int()
+			case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+				x = int64(index.Uint())
+			default:
+				return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
+			}
+			if x < 0 || x >= int64(v.Len()) {
+				return nil, fmt.Errorf("index out of range: %d", x)
+			}
+			v = v.Index(int(x))
+		case reflect.Map:
+			if !index.IsValid() {
+				index = reflect.Zero(v.Type().Key())
+			}
+			if !index.Type().AssignableTo(v.Type().Key()) {
+				return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
+			}
+			if x := v.MapIndex(index); x.IsValid() {
+				v = x
+			} else {
+				v = reflect.Zero(v.Type().Elem())
+			}
+		default:
+			return nil, fmt.Errorf("can't index item of type %s", v.Type())
+		}
+	}
+	return v.Interface(), nil
+}
+
+// Length
+
+// length returns the length of the item, with an error if it has no defined length.
+func length(item interface{}) (int, error) {
+	v, isNil := indirect(reflect.ValueOf(item))
+	if isNil {
+		return 0, fmt.Errorf("len of nil pointer")
+	}
+	switch v.Kind() {
+	case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len(), nil
+	}
+	return 0, fmt.Errorf("len of type %s", v.Type())
+}
+
+// Function invocation
+
+// call returns the result of evaluating the first argument as a function.
+// The function must return 1 result, or 2 results, the second of which is an error.
+func call(fn interface{}, args ...interface{}) (interface{}, error) {
+	v := reflect.ValueOf(fn)
+	typ := v.Type()
+	if typ.Kind() != reflect.Func {
+		return nil, fmt.Errorf("non-function of type %s", typ)
+	}
+	if !goodFunc(typ) {
+		return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
+	}
+	numIn := typ.NumIn()
+	var dddType reflect.Type
+	if typ.IsVariadic() {
+		if len(args) < numIn-1 {
+			return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
+		}
+		dddType = typ.In(numIn - 1).Elem()
+	} else {
+		if len(args) != numIn {
+			return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
+		}
+	}
+	argv := make([]reflect.Value, len(args))
+	for i, arg := range args {
+		value := reflect.ValueOf(arg)
+		// Compute the expected type. Clumsy because of variadics.
+		var argType reflect.Type
+		if !typ.IsVariadic() || i < numIn-1 {
+			argType = typ.In(i)
+		} else {
+			argType = dddType
+		}
+		if !value.IsValid() && canBeNil(argType) {
+			value = reflect.Zero(argType)
+		}
+		if !value.Type().AssignableTo(argType) {
+			return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
+		}
+		argv[i] = value
+	}
+	result := v.Call(argv)
+	if len(result) == 2 && !result[1].IsNil() {
+		return result[0].Interface(), result[1].Interface().(error)
+	}
+	return result[0].Interface(), nil
+}
+
+// Boolean logic.
+
+func truth(a interface{}) bool {
+	t, _ := isTrue(reflect.ValueOf(a))
+	return t
+}
+
+// and computes the Boolean AND of its arguments, returning
+// the first false argument it encounters, or the last argument.
+func and(arg0 interface{}, args ...interface{}) interface{} {
+	if !truth(arg0) {
+		return arg0
+	}
+	for i := range args {
+		arg0 = args[i]
+		if !truth(arg0) {
+			break
+		}
+	}
+	return arg0
+}
+
+// or computes the Boolean OR of its arguments, returning
+// the first true argument it encounters, or the last argument.
+func or(arg0 interface{}, args ...interface{}) interface{} {
+	if truth(arg0) {
+		return arg0
+	}
+	for i := range args {
+		arg0 = args[i]
+		if truth(arg0) {
+			break
+		}
+	}
+	return arg0
+}
+
+// not returns the Boolean negation of its argument.
+func not(arg interface{}) (truth bool) {
+	truth, _ = isTrue(reflect.ValueOf(arg))
+	return !truth
+}
+
+// Comparison.
+
+// TODO: Perhaps allow comparison between signed and unsigned integers.
+
+var (
+	errBadComparisonType = errors.New("invalid type for comparison")
+	errBadComparison     = errors.New("incompatible types for comparison")
+	errNoComparison      = errors.New("missing argument for comparison")
+)
+
+type kind int
+
+const (
+	invalidKind kind = iota
+	boolKind
+	complexKind
+	intKind
+	floatKind
+	integerKind
+	stringKind
+	uintKind
+)
+
+func basicKind(v reflect.Value) (kind, error) {
+	switch v.Kind() {
+	case reflect.Bool:
+		return boolKind, nil
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return intKind, nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return uintKind, nil
+	case reflect.Float32, reflect.Float64:
+		return floatKind, nil
+	case reflect.Complex64, reflect.Complex128:
+		return complexKind, nil
+	case reflect.String:
+		return stringKind, nil
+	}
+	return invalidKind, errBadComparisonType
+}
+
+// eq evaluates the comparison a == b || a == c || ...
+func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
+	v1 := reflect.ValueOf(arg1)
+	k1, err := basicKind(v1)
+	if err != nil {
+		return false, err
+	}
+	if len(arg2) == 0 {
+		return false, errNoComparison
+	}
+	for _, arg := range arg2 {
+		v2 := reflect.ValueOf(arg)
+		k2, err := basicKind(v2)
+		if err != nil {
+			return false, err
+		}
+		truth := false
+		if k1 != k2 {
+			// Special case: Can compare integer values regardless of type's sign.
+			switch {
+			case k1 == intKind && k2 == uintKind:
+				truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
+			case k1 == uintKind && k2 == intKind:
+				truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
+			default:
+				return false, errBadComparison
+			}
+		} else {
+			switch k1 {
+			case boolKind:
+				truth = v1.Bool() == v2.Bool()
+			case complexKind:
+				truth = v1.Complex() == v2.Complex()
+			case floatKind:
+				truth = v1.Float() == v2.Float()
+			case intKind:
+				truth = v1.Int() == v2.Int()
+			case stringKind:
+				truth = v1.String() == v2.String()
+			case uintKind:
+				truth = v1.Uint() == v2.Uint()
+			default:
+				panic("invalid kind")
+			}
+		}
+		if truth {
+			return true, nil
+		}
+	}
+	return false, nil
+}
+
+// ne evaluates the comparison a != b.
+func ne(arg1, arg2 interface{}) (bool, error) {
+	// != is the inverse of ==.
+	equal, err := eq(arg1, arg2)
+	return !equal, err
+}
+
+// lt evaluates the comparison a < b.
+func lt(arg1, arg2 interface{}) (bool, error) {
+	v1 := reflect.ValueOf(arg1)
+	k1, err := basicKind(v1)
+	if err != nil {
+		return false, err
+	}
+	v2 := reflect.ValueOf(arg2)
+	k2, err := basicKind(v2)
+	if err != nil {
+		return false, err
+	}
+	truth := false
+	if k1 != k2 {
+		// Special case: Can compare integer values regardless of type's sign.
+		switch {
+		case k1 == intKind && k2 == uintKind:
+			truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
+		case k1 == uintKind && k2 == intKind:
+			truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
+		default:
+			return false, errBadComparison
+		}
+	} else {
+		switch k1 {
+		case boolKind, complexKind:
+			return false, errBadComparisonType
+		case floatKind:
+			truth = v1.Float() < v2.Float()
+		case intKind:
+			truth = v1.Int() < v2.Int()
+		case stringKind:
+			truth = v1.String() < v2.String()
+		case uintKind:
+			truth = v1.Uint() < v2.Uint()
+		default:
+			panic("invalid kind")
+		}
+	}
+	return truth, nil
+}
+
+// le evaluates the comparison <= b.
+func le(arg1, arg2 interface{}) (bool, error) {
+	// <= is < or ==.
+	lessThan, err := lt(arg1, arg2)
+	if lessThan || err != nil {
+		return lessThan, err
+	}
+	return eq(arg1, arg2)
+}
+
+// gt evaluates the comparison a > b.
+func gt(arg1, arg2 interface{}) (bool, error) {
+	// > is the inverse of <=.
+	lessOrEqual, err := le(arg1, arg2)
+	if err != nil {
+		return false, err
+	}
+	return !lessOrEqual, nil
+}
+
+// ge evaluates the comparison a >= b.
+func ge(arg1, arg2 interface{}) (bool, error) {
+	// >= is the inverse of <.
+	lessThan, err := lt(arg1, arg2)
+	if err != nil {
+		return false, err
+	}
+	return !lessThan, nil
+}
+
+// HTML escaping.
+
+var (
+	htmlQuot = []byte("&#34;") // shorter than "&quot;"
+	htmlApos = []byte("&#39;") // shorter than "&apos;" and apos was not in HTML until HTML5
+	htmlAmp  = []byte("&amp;")
+	htmlLt   = []byte("&lt;")
+	htmlGt   = []byte("&gt;")
+)
+
+// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
+func HTMLEscape(w io.Writer, b []byte) {
+	last := 0
+	for i, c := range b {
+		var html []byte
+		switch c {
+		case '"':
+			html = htmlQuot
+		case '\'':
+			html = htmlApos
+		case '&':
+			html = htmlAmp
+		case '<':
+			html = htmlLt
+		case '>':
+			html = htmlGt
+		default:
+			continue
+		}
+		w.Write(b[last:i])
+		w.Write(html)
+		last = i + 1
+	}
+	w.Write(b[last:])
+}
+
+// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
+func HTMLEscapeString(s string) string {
+	// Avoid allocation if we can.
+	if strings.IndexAny(s, `'"&<>`) < 0 {
+		return s
+	}
+	var b bytes.Buffer
+	HTMLEscape(&b, []byte(s))
+	return b.String()
+}
+
+// HTMLEscaper returns the escaped HTML equivalent of the textual
+// representation of its arguments.
+func HTMLEscaper(args ...interface{}) string {
+	return HTMLEscapeString(evalArgs(args))
+}
+
+// JavaScript escaping.
+
+var (
+	jsLowUni = []byte(`\u00`)
+	hex      = []byte("0123456789ABCDEF")
+
+	jsBackslash = []byte(`\\`)
+	jsApos      = []byte(`\'`)
+	jsQuot      = []byte(`\"`)
+	jsLt        = []byte(`\x3C`)
+	jsGt        = []byte(`\x3E`)
+)
+
+// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
+func JSEscape(w io.Writer, b []byte) {
+	last := 0
+	for i := 0; i < len(b); i++ {
+		c := b[i]
+
+		if !jsIsSpecial(rune(c)) {
+			// fast path: nothing to do
+			continue
+		}
+		w.Write(b[last:i])
+
+		if c < utf8.RuneSelf {
+			// Quotes, slashes and angle brackets get quoted.
+			// Control characters get written as \u00XX.
+			switch c {
+			case '\\':
+				w.Write(jsBackslash)
+			case '\'':
+				w.Write(jsApos)
+			case '"':
+				w.Write(jsQuot)
+			case '<':
+				w.Write(jsLt)
+			case '>':
+				w.Write(jsGt)
+			default:
+				w.Write(jsLowUni)
+				t, b := c>>4, c&0x0f
+				w.Write(hex[t : t+1])
+				w.Write(hex[b : b+1])
+			}
+		} else {
+			// Unicode rune.
+			r, size := utf8.DecodeRune(b[i:])
+			if unicode.IsPrint(r) {
+				w.Write(b[i : i+size])
+			} else {
+				fmt.Fprintf(w, "\\u%04X", r)
+			}
+			i += size - 1
+		}
+		last = i + 1
+	}
+	w.Write(b[last:])
+}
+
+// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
+func JSEscapeString(s string) string {
+	// Avoid allocation if we can.
+	if strings.IndexFunc(s, jsIsSpecial) < 0 {
+		return s
+	}
+	var b bytes.Buffer
+	JSEscape(&b, []byte(s))
+	return b.String()
+}
+
+func jsIsSpecial(r rune) bool {
+	switch r {
+	case '\\', '\'', '"', '<', '>':
+		return true
+	}
+	return r < ' ' || utf8.RuneSelf <= r
+}
+
+// JSEscaper returns the escaped JavaScript equivalent of the textual
+// representation of its arguments.
+func JSEscaper(args ...interface{}) string {
+	return JSEscapeString(evalArgs(args))
+}
+
+// URLQueryEscaper returns the escaped value of the textual representation of
+// its arguments in a form suitable for embedding in a URL query.
+func URLQueryEscaper(args ...interface{}) string {
+	return url.QueryEscape(evalArgs(args))
+}
+
+// evalArgs formats the list of arguments into a string. It is therefore equivalent to
+//	fmt.Sprint(args...)
+// except that each argument is indirected (if a pointer), as required,
+// using the same rules as the default string evaluation during template
+// execution.
+func evalArgs(args []interface{}) string {
+	ok := false
+	var s string
+	// Fast path for simple common case.
+	if len(args) == 1 {
+		s, ok = args[0].(string)
+	}
+	if !ok {
+		for i, arg := range args {
+			a, ok := printableValue(reflect.ValueOf(arg))
+			if ok {
+				args[i] = a
+			} // else left fmt do its thing
+		}
+		s = fmt.Sprint(args...)
+	}
+	return s
+}
diff --git a/vendor/k8s.io/client-go/util/jsonpath/BUILD b/vendor/k8s.io/client-go/util/jsonpath/BUILD
new file mode 100644
index 00000000..521b689f
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/jsonpath/BUILD
@@ -0,0 +1,31 @@
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+load(
+    "@io_bazel_rules_go//go:def.bzl",
+    "go_library",
+    "go_test",
+)
+
+go_test(
+    name = "go_default_test",
+    srcs = [
+        "jsonpath_test.go",
+        "parser_test.go",
+    ],
+    library = ":go_default_library",
+    tags = ["automanaged"],
+)
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "doc.go",
+        "jsonpath.go",
+        "node.go",
+        "parser.go",
+    ],
+    tags = ["automanaged"],
+    deps = ["//vendor/k8s.io/client-go/third_party/forked/golang/template:go_default_library"],
+)
diff --git a/vendor/k8s.io/client-go/util/jsonpath/doc.go b/vendor/k8s.io/client-go/util/jsonpath/doc.go
new file mode 100644
index 00000000..0effb15c
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/jsonpath/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// package jsonpath is a template engine using jsonpath syntax,
+// which can be seen at http://goessner.net/articles/JsonPath/.
+// In addition, it has {range} {end} function to iterate list and slice.
+package jsonpath // import "k8s.io/client-go/util/jsonpath"
diff --git a/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go b/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
new file mode 100644
index 00000000..decbec0b
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
@@ -0,0 +1,509 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package jsonpath
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+
+	"k8s.io/client-go/third_party/forked/golang/template"
+)
+
+type JSONPath struct {
+	name       string
+	parser     *Parser
+	stack      [][]reflect.Value //push and pop values in different scopes
+	cur        []reflect.Value   //current scope values
+	beginRange int
+	inRange    int
+	endRange   int
+
+	allowMissingKeys bool
+}
+
+func New(name string) *JSONPath {
+	return &JSONPath{
+		name:       name,
+		beginRange: 0,
+		inRange:    0,
+		endRange:   0,
+	}
+}
+
+// AllowMissingKeys allows a caller to specify whether they want an error if a field or map key
+// cannot be located, or simply an empty result. The receiver is returned for chaining.
+func (j *JSONPath) AllowMissingKeys(allow bool) *JSONPath {
+	j.allowMissingKeys = allow
+	return j
+}
+
+// Parse parse the given template, return error
+func (j *JSONPath) Parse(text string) (err error) {
+	j.parser, err = Parse(j.name, text)
+	return
+}
+
+// Execute bounds data into template and write the result
+func (j *JSONPath) Execute(wr io.Writer, data interface{}) error {
+	fullResults, err := j.FindResults(data)
+	if err != nil {
+		return err
+	}
+	for ix := range fullResults {
+		if err := j.PrintResults(wr, fullResults[ix]); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
+	if j.parser == nil {
+		return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name)
+	}
+
+	j.cur = []reflect.Value{reflect.ValueOf(data)}
+	nodes := j.parser.Root.Nodes
+	fullResult := [][]reflect.Value{}
+	for i := 0; i < len(nodes); i++ {
+		node := nodes[i]
+		results, err := j.walk(j.cur, node)
+		if err != nil {
+			return nil, err
+		}
+
+		//encounter an end node, break the current block
+		if j.endRange > 0 && j.endRange <= j.inRange {
+			j.endRange -= 1
+			break
+		}
+		//encounter a range node, start a range loop
+		if j.beginRange > 0 {
+			j.beginRange -= 1
+			j.inRange += 1
+			for k, value := range results {
+				j.parser.Root.Nodes = nodes[i+1:]
+				if k == len(results)-1 {
+					j.inRange -= 1
+				}
+				nextResults, err := j.FindResults(value.Interface())
+				if err != nil {
+					return nil, err
+				}
+				fullResult = append(fullResult, nextResults...)
+			}
+			break
+		}
+		fullResult = append(fullResult, results)
+	}
+	return fullResult, nil
+}
+
+// PrintResults write the results into writer
+func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error {
+	for i, r := range results {
+		text, err := j.evalToText(r)
+		if err != nil {
+			return err
+		}
+		if i != len(results)-1 {
+			text = append(text, ' ')
+		}
+		if _, err = wr.Write(text); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// walk visits tree rooted at the given node in DFS order
+func (j *JSONPath) walk(value []reflect.Value, node Node) ([]reflect.Value, error) {
+	switch node := node.(type) {
+	case *ListNode:
+		return j.evalList(value, node)
+	case *TextNode:
+		return []reflect.Value{reflect.ValueOf(node.Text)}, nil
+	case *FieldNode:
+		return j.evalField(value, node)
+	case *ArrayNode:
+		return j.evalArray(value, node)
+	case *FilterNode:
+		return j.evalFilter(value, node)
+	case *IntNode:
+		return j.evalInt(value, node)
+	case *BoolNode:
+		return j.evalBool(value, node)
+	case *FloatNode:
+		return j.evalFloat(value, node)
+	case *WildcardNode:
+		return j.evalWildcard(value, node)
+	case *RecursiveNode:
+		return j.evalRecursive(value, node)
+	case *UnionNode:
+		return j.evalUnion(value, node)
+	case *IdentifierNode:
+		return j.evalIdentifier(value, node)
+	default:
+		return value, fmt.Errorf("unexpected Node %v", node)
+	}
+}
+
+// evalInt evaluates IntNode
+func (j *JSONPath) evalInt(input []reflect.Value, node *IntNode) ([]reflect.Value, error) {
+	result := make([]reflect.Value, len(input))
+	for i := range input {
+		result[i] = reflect.ValueOf(node.Value)
+	}
+	return result, nil
+}
+
+// evalFloat evaluates FloatNode
+func (j *JSONPath) evalFloat(input []reflect.Value, node *FloatNode) ([]reflect.Value, error) {
+	result := make([]reflect.Value, len(input))
+	for i := range input {
+		result[i] = reflect.ValueOf(node.Value)
+	}
+	return result, nil
+}
+
+// evalBool evaluates BoolNode
+func (j *JSONPath) evalBool(input []reflect.Value, node *BoolNode) ([]reflect.Value, error) {
+	result := make([]reflect.Value, len(input))
+	for i := range input {
+		result[i] = reflect.ValueOf(node.Value)
+	}
+	return result, nil
+}
+
+// evalList evaluates ListNode
+func (j *JSONPath) evalList(value []reflect.Value, node *ListNode) ([]reflect.Value, error) {
+	var err error
+	curValue := value
+	for _, node := range node.Nodes {
+		curValue, err = j.walk(curValue, node)
+		if err != nil {
+			return curValue, err
+		}
+	}
+	return curValue, nil
+}
+
+// evalIdentifier evaluates IdentifierNode
+func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) ([]reflect.Value, error) {
+	results := []reflect.Value{}
+	switch node.Name {
+	case "range":
+		j.stack = append(j.stack, j.cur)
+		j.beginRange += 1
+		results = input
+	case "end":
+		if j.endRange < j.inRange { //inside a loop, break the current block
+			j.endRange += 1
+			break
+		}
+		// the loop is about to end, pop value and continue the following execution
+		if len(j.stack) > 0 {
+			j.cur, j.stack = j.stack[len(j.stack)-1], j.stack[:len(j.stack)-1]
+		} else {
+			return results, fmt.Errorf("not in range, nothing to end")
+		}
+	default:
+		return input, fmt.Errorf("unrecognized identifier %v", node.Name)
+	}
+	return results, nil
+}
+
+// evalArray evaluates ArrayNode
+func (j *JSONPath) evalArray(input []reflect.Value, node *ArrayNode) ([]reflect.Value, error) {
+	result := []reflect.Value{}
+	for _, value := range input {
+
+		value, isNil := template.Indirect(value)
+		if isNil {
+			continue
+		}
+		if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
+			return input, fmt.Errorf("%v is not array or slice", value.Type())
+		}
+		params := node.Params
+		if !params[0].Known {
+			params[0].Value = 0
+		}
+		if params[0].Value < 0 {
+			params[0].Value += value.Len()
+		}
+		if !params[1].Known {
+			params[1].Value = value.Len()
+		}
+
+		if params[1].Value < 0 {
+			params[1].Value += value.Len()
+		}
+
+		sliceLength := value.Len()
+		if params[1].Value != params[0].Value { // if you're requesting zero elements, allow it through.
+			if params[0].Value >= sliceLength {
+				return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[0].Value, sliceLength)
+			}
+			if params[1].Value > sliceLength {
+				return input, fmt.Errorf("array index out of bounds: index %d, length %d", params[1].Value-1, sliceLength)
+			}
+		}
+
+		if !params[2].Known {
+			value = value.Slice(params[0].Value, params[1].Value)
+		} else {
+			value = value.Slice3(params[0].Value, params[1].Value, params[2].Value)
+		}
+		for i := 0; i < value.Len(); i++ {
+			result = append(result, value.Index(i))
+		}
+	}
+	return result, nil
+}
+
+// evalUnion evaluates UnionNode
+func (j *JSONPath) evalUnion(input []reflect.Value, node *UnionNode) ([]reflect.Value, error) {
+	result := []reflect.Value{}
+	for _, listNode := range node.Nodes {
+		temp, err := j.evalList(input, listNode)
+		if err != nil {
+			return input, err
+		}
+		result = append(result, temp...)
+	}
+	return result, nil
+}
+
+func (j *JSONPath) findFieldInValue(value *reflect.Value, node *FieldNode) (reflect.Value, error) {
+	t := value.Type()
+	var inlineValue *reflect.Value
+	for ix := 0; ix < t.NumField(); ix++ {
+		f := t.Field(ix)
+		jsonTag := f.Tag.Get("json")
+		parts := strings.Split(jsonTag, ",")
+		if len(parts) == 0 {
+			continue
+		}
+		if parts[0] == node.Value {
+			return value.Field(ix), nil
+		}
+		if len(parts[0]) == 0 {
+			val := value.Field(ix)
+			inlineValue = &val
+		}
+	}
+	if inlineValue != nil {
+		if inlineValue.Kind() == reflect.Struct {
+			// handle 'inline'
+			match, err := j.findFieldInValue(inlineValue, node)
+			if err != nil {
+				return reflect.Value{}, err
+			}
+			if match.IsValid() {
+				return match, nil
+			}
+		}
+	}
+	return value.FieldByName(node.Value), nil
+}
+
+// evalField evaluates field of struct or key of map.
+func (j *JSONPath) evalField(input []reflect.Value, node *FieldNode) ([]reflect.Value, error) {
+	results := []reflect.Value{}
+	// If there's no input, there's no output
+	if len(input) == 0 {
+		return results, nil
+	}
+	for _, value := range input {
+		var result reflect.Value
+		value, isNil := template.Indirect(value)
+		if isNil {
+			continue
+		}
+
+		if value.Kind() == reflect.Struct {
+			var err error
+			if result, err = j.findFieldInValue(&value, node); err != nil {
+				return nil, err
+			}
+		} else if value.Kind() == reflect.Map {
+			mapKeyType := value.Type().Key()
+			nodeValue := reflect.ValueOf(node.Value)
+			// node value type must be convertible to map key type
+			if !nodeValue.Type().ConvertibleTo(mapKeyType) {
+				return results, fmt.Errorf("%s is not convertible to %s", nodeValue, mapKeyType)
+			}
+			result = value.MapIndex(nodeValue.Convert(mapKeyType))
+		}
+		if result.IsValid() {
+			results = append(results, result)
+		}
+	}
+	if len(results) == 0 {
+		if j.allowMissingKeys {
+			return results, nil
+		}
+		return results, fmt.Errorf("%s is not found", node.Value)
+	}
+	return results, nil
+}
+
+// evalWildcard extract all contents of the given value
+func (j *JSONPath) evalWildcard(input []reflect.Value, node *WildcardNode) ([]reflect.Value, error) {
+	results := []reflect.Value{}
+	for _, value := range input {
+		value, isNil := template.Indirect(value)
+		if isNil {
+			continue
+		}
+
+		kind := value.Kind()
+		if kind == reflect.Struct {
+			for i := 0; i < value.NumField(); i++ {
+				results = append(results, value.Field(i))
+			}
+		} else if kind == reflect.Map {
+			for _, key := range value.MapKeys() {
+				results = append(results, value.MapIndex(key))
+			}
+		} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
+			for i := 0; i < value.Len(); i++ {
+				results = append(results, value.Index(i))
+			}
+		}
+	}
+	return results, nil
+}
+
+// evalRecursive visit the given value recursively and push all of them to result
+func (j *JSONPath) evalRecursive(input []reflect.Value, node *RecursiveNode) ([]reflect.Value, error) {
+	result := []reflect.Value{}
+	for _, value := range input {
+		results := []reflect.Value{}
+		value, isNil := template.Indirect(value)
+		if isNil {
+			continue
+		}
+
+		kind := value.Kind()
+		if kind == reflect.Struct {
+			for i := 0; i < value.NumField(); i++ {
+				results = append(results, value.Field(i))
+			}
+		} else if kind == reflect.Map {
+			for _, key := range value.MapKeys() {
+				results = append(results, value.MapIndex(key))
+			}
+		} else if kind == reflect.Array || kind == reflect.Slice || kind == reflect.String {
+			for i := 0; i < value.Len(); i++ {
+				results = append(results, value.Index(i))
+			}
+		}
+		if len(results) != 0 {
+			result = append(result, value)
+			output, err := j.evalRecursive(results, node)
+			if err != nil {
+				return result, err
+			}
+			result = append(result, output...)
+		}
+	}
+	return result, nil
+}
+
+// evalFilter filter array according to FilterNode
+func (j *JSONPath) evalFilter(input []reflect.Value, node *FilterNode) ([]reflect.Value, error) {
+	results := []reflect.Value{}
+	for _, value := range input {
+		value, _ = template.Indirect(value)
+
+		if value.Kind() != reflect.Array && value.Kind() != reflect.Slice {
+			return input, fmt.Errorf("%v is not array or slice and cannot be filtered", value)
+		}
+		for i := 0; i < value.Len(); i++ {
+			temp := []reflect.Value{value.Index(i)}
+			lefts, err := j.evalList(temp, node.Left)
+
+			//case exists
+			if node.Operator == "exists" {
+				if len(lefts) > 0 {
+					results = append(results, value.Index(i))
+				}
+				continue
+			}
+
+			if err != nil {
+				return input, err
+			}
+
+			var left, right interface{}
+			if len(lefts) != 1 {
+				return input, fmt.Errorf("can only compare one element at a time")
+			}
+			left = lefts[0].Interface()
+
+			rights, err := j.evalList(temp, node.Right)
+			if err != nil {
+				return input, err
+			}
+			if len(rights) != 1 {
+				return input, fmt.Errorf("can only compare one element at a time")
+			}
+			right = rights[0].Interface()
+
+			pass := false
+			switch node.Operator {
+			case "<":
+				pass, err = template.Less(left, right)
+			case ">":
+				pass, err = template.Greater(left, right)
+			case "==":
+				pass, err = template.Equal(left, right)
+			case "!=":
+				pass, err = template.NotEqual(left, right)
+			case "<=":
+				pass, err = template.LessEqual(left, right)
+			case ">=":
+				pass, err = template.GreaterEqual(left, right)
+			default:
+				return results, fmt.Errorf("unrecognized filter operator %s", node.Operator)
+			}
+			if err != nil {
+				return results, err
+			}
+			if pass {
+				results = append(results, value.Index(i))
+			}
+		}
+	}
+	return results, nil
+}
+
+// evalToText translates reflect value to corresponding text
+func (j *JSONPath) evalToText(v reflect.Value) ([]byte, error) {
+	iface, ok := template.PrintableValue(v)
+	if !ok {
+		return nil, fmt.Errorf("can't print type %s", v.Type())
+	}
+	var buffer bytes.Buffer
+	fmt.Fprint(&buffer, iface)
+	return buffer.Bytes(), nil
+}
diff --git a/vendor/k8s.io/client-go/util/jsonpath/node.go b/vendor/k8s.io/client-go/util/jsonpath/node.go
new file mode 100644
index 00000000..be74c4fe
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/jsonpath/node.go
@@ -0,0 +1,255 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package jsonpath
+
+import "fmt"
+
+// NodeType identifies the type of a parse tree node.
+type NodeType int
+
+// Type returns itself and provides an easy default implementation
+func (t NodeType) Type() NodeType {
+	return t
+}
+
+func (t NodeType) String() string {
+	return NodeTypeName[t]
+}
+
+const (
+	NodeText NodeType = iota
+	NodeArray
+	NodeList
+	NodeField
+	NodeIdentifier
+	NodeFilter
+	NodeInt
+	NodeFloat
+	NodeWildcard
+	NodeRecursive
+	NodeUnion
+	NodeBool
+)
+
+var NodeTypeName = map[NodeType]string{
+	NodeText:       "NodeText",
+	NodeArray:      "NodeArray",
+	NodeList:       "NodeList",
+	NodeField:      "NodeField",
+	NodeIdentifier: "NodeIdentifier",
+	NodeFilter:     "NodeFilter",
+	NodeInt:        "NodeInt",
+	NodeFloat:      "NodeFloat",
+	NodeWildcard:   "NodeWildcard",
+	NodeRecursive:  "NodeRecursive",
+	NodeUnion:      "NodeUnion",
+	NodeBool:       "NodeBool",
+}
+
+type Node interface {
+	Type() NodeType
+	String() string
+}
+
+// ListNode holds a sequence of nodes.
+type ListNode struct {
+	NodeType
+	Nodes []Node // The element nodes in lexical order.
+}
+
+func newList() *ListNode {
+	return &ListNode{NodeType: NodeList}
+}
+
+func (l *ListNode) append(n Node) {
+	l.Nodes = append(l.Nodes, n)
+}
+
+func (l *ListNode) String() string {
+	return fmt.Sprintf("%s", l.Type())
+}
+
+// TextNode holds plain text.
+type TextNode struct {
+	NodeType
+	Text string // The text; may span newlines.
+}
+
+func newText(text string) *TextNode {
+	return &TextNode{NodeType: NodeText, Text: text}
+}
+
+func (t *TextNode) String() string {
+	return fmt.Sprintf("%s: %s", t.Type(), t.Text)
+}
+
+// FieldNode holds field of struct
+type FieldNode struct {
+	NodeType
+	Value string
+}
+
+func newField(value string) *FieldNode {
+	return &FieldNode{NodeType: NodeField, Value: value}
+}
+
+func (f *FieldNode) String() string {
+	return fmt.Sprintf("%s: %s", f.Type(), f.Value)
+}
+
+// IdentifierNode holds an identifier
+type IdentifierNode struct {
+	NodeType
+	Name string
+}
+
+func newIdentifier(value string) *IdentifierNode {
+	return &IdentifierNode{
+		NodeType: NodeIdentifier,
+		Name:     value,
+	}
+}
+
+func (f *IdentifierNode) String() string {
+	return fmt.Sprintf("%s: %s", f.Type(), f.Name)
+}
+
+// ParamsEntry holds param information for ArrayNode
+type ParamsEntry struct {
+	Value int
+	Known bool //whether the value is known when parse it
+}
+
+// ArrayNode holds start, end, step information for array index selection
+type ArrayNode struct {
+	NodeType
+	Params [3]ParamsEntry //start, end, step
+}
+
+func newArray(params [3]ParamsEntry) *ArrayNode {
+	return &ArrayNode{
+		NodeType: NodeArray,
+		Params:   params,
+	}
+}
+
+func (a *ArrayNode) String() string {
+	return fmt.Sprintf("%s: %v", a.Type(), a.Params)
+}
+
+// FilterNode holds operand and operator information for filter
+type FilterNode struct {
+	NodeType
+	Left     *ListNode
+	Right    *ListNode
+	Operator string
+}
+
+func newFilter(left, right *ListNode, operator string) *FilterNode {
+	return &FilterNode{
+		NodeType: NodeFilter,
+		Left:     left,
+		Right:    right,
+		Operator: operator,
+	}
+}
+
+func (f *FilterNode) String() string {
+	return fmt.Sprintf("%s: %s %s %s", f.Type(), f.Left, f.Operator, f.Right)
+}
+
+// IntNode holds integer value
+type IntNode struct {
+	NodeType
+	Value int
+}
+
+func newInt(num int) *IntNode {
+	return &IntNode{NodeType: NodeInt, Value: num}
+}
+
+func (i *IntNode) String() string {
+	return fmt.Sprintf("%s: %d", i.Type(), i.Value)
+}
+
+// FloatNode holds float value
+type FloatNode struct {
+	NodeType
+	Value float64
+}
+
+func newFloat(num float64) *FloatNode {
+	return &FloatNode{NodeType: NodeFloat, Value: num}
+}
+
+func (i *FloatNode) String() string {
+	return fmt.Sprintf("%s: %f", i.Type(), i.Value)
+}
+
+// WildcardNode means a wildcard
+type WildcardNode struct {
+	NodeType
+}
+
+func newWildcard() *WildcardNode {
+	return &WildcardNode{NodeType: NodeWildcard}
+}
+
+func (i *WildcardNode) String() string {
+	return fmt.Sprintf("%s", i.Type())
+}
+
+// RecursiveNode means a recursive descent operator
+type RecursiveNode struct {
+	NodeType
+}
+
+func newRecursive() *RecursiveNode {
+	return &RecursiveNode{NodeType: NodeRecursive}
+}
+
+func (r *RecursiveNode) String() string {
+	return fmt.Sprintf("%s", r.Type())
+}
+
+// UnionNode is union of ListNode
+type UnionNode struct {
+	NodeType
+	Nodes []*ListNode
+}
+
+func newUnion(nodes []*ListNode) *UnionNode {
+	return &UnionNode{NodeType: NodeUnion, Nodes: nodes}
+}
+
+func (u *UnionNode) String() string {
+	return fmt.Sprintf("%s", u.Type())
+}
+
+// BoolNode holds bool value
+type BoolNode struct {
+	NodeType
+	Value bool
+}
+
+func newBool(value bool) *BoolNode {
+	return &BoolNode{NodeType: NodeBool, Value: value}
+}
+
+func (b *BoolNode) String() string {
+	return fmt.Sprintf("%s: %t", b.Type(), b.Value)
+}
diff --git a/vendor/k8s.io/client-go/util/jsonpath/parser.go b/vendor/k8s.io/client-go/util/jsonpath/parser.go
new file mode 100644
index 00000000..39782afc
--- /dev/null
+++ b/vendor/k8s.io/client-go/util/jsonpath/parser.go
@@ -0,0 +1,449 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package jsonpath
+
+import (
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+const eof = -1
+
+const (
+	leftDelim  = "{"
+	rightDelim = "}"
+)
+
+type Parser struct {
+	Name  string
+	Root  *ListNode
+	input string
+	cur   *ListNode
+	pos   int
+	start int
+	width int
+}
+
+// Parse parsed the given text and return a node Parser.
+// If an error is encountered, parsing stops and an empty
+// Parser is returned with the error
+func Parse(name, text string) (*Parser, error) {
+	p := NewParser(name)
+	err := p.Parse(text)
+	if err != nil {
+		p = nil
+	}
+	return p, err
+}
+
+func NewParser(name string) *Parser {
+	return &Parser{
+		Name: name,
+	}
+}
+
+// parseAction parsed the expression inside delimiter
+func parseAction(name, text string) (*Parser, error) {
+	p, err := Parse(name, fmt.Sprintf("%s%s%s", leftDelim, text, rightDelim))
+	// when error happens, p will be nil, so we need to return here
+	if err != nil {
+		return p, err
+	}
+	p.Root = p.Root.Nodes[0].(*ListNode)
+	return p, nil
+}
+
+func (p *Parser) Parse(text string) error {
+	p.input = text
+	p.Root = newList()
+	p.pos = 0
+	return p.parseText(p.Root)
+}
+
+// consumeText return the parsed text since last cosumeText
+func (p *Parser) consumeText() string {
+	value := p.input[p.start:p.pos]
+	p.start = p.pos
+	return value
+}
+
+// next returns the next rune in the input.
+func (p *Parser) next() rune {
+	if int(p.pos) >= len(p.input) {
+		p.width = 0
+		return eof
+	}
+	r, w := utf8.DecodeRuneInString(p.input[p.pos:])
+	p.width = w
+	p.pos += p.width
+	return r
+}
+
+// peek returns but does not consume the next rune in the input.
+func (p *Parser) peek() rune {
+	r := p.next()
+	p.backup()
+	return r
+}
+
+// backup steps back one rune. Can only be called once per call of next.
+func (p *Parser) backup() {
+	p.pos -= p.width
+}
+
+func (p *Parser) parseText(cur *ListNode) error {
+	for {
+		if strings.HasPrefix(p.input[p.pos:], leftDelim) {
+			if p.pos > p.start {
+				cur.append(newText(p.consumeText()))
+			}
+			return p.parseLeftDelim(cur)
+		}
+		if p.next() == eof {
+			break
+		}
+	}
+	// Correctly reached EOF.
+	if p.pos > p.start {
+		cur.append(newText(p.consumeText()))
+	}
+	return nil
+}
+
+// parseLeftDelim scans the left delimiter, which is known to be present.
+func (p *Parser) parseLeftDelim(cur *ListNode) error {
+	p.pos += len(leftDelim)
+	p.consumeText()
+	newNode := newList()
+	cur.append(newNode)
+	cur = newNode
+	return p.parseInsideAction(cur)
+}
+
+func (p *Parser) parseInsideAction(cur *ListNode) error {
+	prefixMap := map[string]func(*ListNode) error{
+		rightDelim: p.parseRightDelim,
+		"[?(":      p.parseFilter,
+		"..":       p.parseRecursive,
+	}
+	for prefix, parseFunc := range prefixMap {
+		if strings.HasPrefix(p.input[p.pos:], prefix) {
+			return parseFunc(cur)
+		}
+	}
+
+	switch r := p.next(); {
+	case r == eof || isEndOfLine(r):
+		return fmt.Errorf("unclosed action")
+	case r == ' ':
+		p.consumeText()
+	case r == '@' || r == '$': //the current object, just pass it
+		p.consumeText()
+	case r == '[':
+		return p.parseArray(cur)
+	case r == '"':
+		return p.parseQuote(cur)
+	case r == '.':
+		return p.parseField(cur)
+	case r == '+' || r == '-' || unicode.IsDigit(r):
+		p.backup()
+		return p.parseNumber(cur)
+	case isAlphaNumeric(r):
+		p.backup()
+		return p.parseIdentifier(cur)
+	default:
+		return fmt.Errorf("unrecognized character in action: %#U", r)
+	}
+	return p.parseInsideAction(cur)
+}
+
+// parseRightDelim scans the right delimiter, which is known to be present.
+func (p *Parser) parseRightDelim(cur *ListNode) error {
+	p.pos += len(rightDelim)
+	p.consumeText()
+	cur = p.Root
+	return p.parseText(cur)
+}
+
+// parseIdentifier scans build-in keywords, like "range" "end"
+func (p *Parser) parseIdentifier(cur *ListNode) error {
+	var r rune
+	for {
+		r = p.next()
+		if isTerminator(r) {
+			p.backup()
+			break
+		}
+	}
+	value := p.consumeText()
+
+	if isBool(value) {
+		v, err := strconv.ParseBool(value)
+		if err != nil {
+			return fmt.Errorf("can not parse bool '%s': %s", value, err.Error())
+		}
+
+		cur.append(newBool(v))
+	} else {
+		cur.append(newIdentifier(value))
+	}
+
+	return p.parseInsideAction(cur)
+}
+
+// parseRecursive scans the recursive desent operator ..
+func (p *Parser) parseRecursive(cur *ListNode) error {
+	p.pos += len("..")
+	p.consumeText()
+	cur.append(newRecursive())
+	if r := p.peek(); isAlphaNumeric(r) {
+		return p.parseField(cur)
+	}
+	return p.parseInsideAction(cur)
+}
+
+// parseNumber scans number
+func (p *Parser) parseNumber(cur *ListNode) error {
+	r := p.peek()
+	if r == '+' || r == '-' {
+		r = p.next()
+	}
+	for {
+		r = p.next()
+		if r != '.' && !unicode.IsDigit(r) {
+			p.backup()
+			break
+		}
+	}
+	value := p.consumeText()
+	i, err := strconv.Atoi(value)
+	if err == nil {
+		cur.append(newInt(i))
+		return p.parseInsideAction(cur)
+	}
+	d, err := strconv.ParseFloat(value, 64)
+	if err == nil {
+		cur.append(newFloat(d))
+		return p.parseInsideAction(cur)
+	}
+	return fmt.Errorf("cannot parse number %s", value)
+}
+
+// parseArray scans array index selection
+func (p *Parser) parseArray(cur *ListNode) error {
+Loop:
+	for {
+		switch p.next() {
+		case eof, '\n':
+			return fmt.Errorf("unterminated array")
+		case ']':
+			break Loop
+		}
+	}
+	text := p.consumeText()
+	text = string(text[1 : len(text)-1])
+	if text == "*" {
+		text = ":"
+	}
+
+	//union operator
+	strs := strings.Split(text, ",")
+	if len(strs) > 1 {
+		union := []*ListNode{}
+		for _, str := range strs {
+			parser, err := parseAction("union", fmt.Sprintf("[%s]", strings.Trim(str, " ")))
+			if err != nil {
+				return err
+			}
+			union = append(union, parser.Root)
+		}
+		cur.append(newUnion(union))
+		return p.parseInsideAction(cur)
+	}
+
+	// dict key
+	reg := regexp.MustCompile(`^'([^']*)'$`)
+	value := reg.FindStringSubmatch(text)
+	if value != nil {
+		parser, err := parseAction("arraydict", fmt.Sprintf(".%s", value[1]))
+		if err != nil {
+			return err
+		}
+		for _, node := range parser.Root.Nodes {
+			cur.append(node)
+		}
+		return p.parseInsideAction(cur)
+	}
+
+	//slice operator
+	reg = regexp.MustCompile(`^(-?[\d]*)(:-?[\d]*)?(:[\d]*)?$`)
+	value = reg.FindStringSubmatch(text)
+	if value == nil {
+		return fmt.Errorf("invalid array index %s", text)
+	}
+	value = value[1:]
+	params := [3]ParamsEntry{}
+	for i := 0; i < 3; i++ {
+		if value[i] != "" {
+			if i > 0 {
+				value[i] = value[i][1:]
+			}
+			if i > 0 && value[i] == "" {
+				params[i].Known = false
+			} else {
+				var err error
+				params[i].Known = true
+				params[i].Value, err = strconv.Atoi(value[i])
+				if err != nil {
+					return fmt.Errorf("array index %s is not a number", value[i])
+				}
+			}
+		} else {
+			if i == 1 {
+				params[i].Known = true
+				params[i].Value = params[0].Value + 1
+			} else {
+				params[i].Known = false
+				params[i].Value = 0
+			}
+		}
+	}
+	cur.append(newArray(params))
+	return p.parseInsideAction(cur)
+}
+
+// parseFilter scans filter inside array selection
+func (p *Parser) parseFilter(cur *ListNode) error {
+	p.pos += len("[?(")
+	p.consumeText()
+Loop:
+	for {
+		switch p.next() {
+		case eof, '\n':
+			return fmt.Errorf("unterminated filter")
+		case ')':
+			break Loop
+		}
+	}
+	if p.next() != ']' {
+		return fmt.Errorf("unclosed array expect ]")
+	}
+	reg := regexp.MustCompile(`^([^!<>=]+)([!<>=]+)(.+?)$`)
+	text := p.consumeText()
+	text = string(text[:len(text)-2])
+	value := reg.FindStringSubmatch(text)
+	if value == nil {
+		parser, err := parseAction("text", text)
+		if err != nil {
+			return err
+		}
+		cur.append(newFilter(parser.Root, newList(), "exists"))
+	} else {
+		leftParser, err := parseAction("left", value[1])
+		if err != nil {
+			return err
+		}
+		rightParser, err := parseAction("right", value[3])
+		if err != nil {
+			return err
+		}
+		cur.append(newFilter(leftParser.Root, rightParser.Root, value[2]))
+	}
+	return p.parseInsideAction(cur)
+}
+
+// parseQuote unquotes string inside double quote
+func (p *Parser) parseQuote(cur *ListNode) error {
+Loop:
+	for {
+		switch p.next() {
+		case eof, '\n':
+			return fmt.Errorf("unterminated quoted string")
+		case '"':
+			break Loop
+		}
+	}
+	value := p.consumeText()
+	s, err := strconv.Unquote(value)
+	if err != nil {
+		return fmt.Errorf("unquote string %s error %v", value, err)
+	}
+	cur.append(newText(s))
+	return p.parseInsideAction(cur)
+}
+
+// parseField scans a field until a terminator
+func (p *Parser) parseField(cur *ListNode) error {
+	p.consumeText()
+	for p.advance() {
+	}
+	value := p.consumeText()
+	if value == "*" {
+		cur.append(newWildcard())
+	} else {
+		cur.append(newField(strings.Replace(value, "\\", "", -1)))
+	}
+	return p.parseInsideAction(cur)
+}
+
+// advance scans until next non-escaped terminator
+func (p *Parser) advance() bool {
+	r := p.next()
+	if r == '\\' {
+		p.next()
+	} else if isTerminator(r) {
+		p.backup()
+		return false
+	}
+	return true
+}
+
+// isTerminator reports whether the input is at valid termination character to appear after an identifier.
+func isTerminator(r rune) bool {
+	if isSpace(r) || isEndOfLine(r) {
+		return true
+	}
+	switch r {
+	case eof, '.', ',', '[', ']', '$', '@', '{', '}':
+		return true
+	}
+	return false
+}
+
+// isSpace reports whether r is a space character.
+func isSpace(r rune) bool {
+	return r == ' ' || r == '\t'
+}
+
+// isEndOfLine reports whether r is an end-of-line character.
+func isEndOfLine(r rune) bool {
+	return r == '\r' || r == '\n'
+}
+
+// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
+func isAlphaNumeric(r rune) bool {
+	return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
+}
+
+// isBool reports whether s is a boolean value.
+func isBool(s string) bool {
+	return s == "true" || s == "false"
+}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index d81265ac..86fcc9d3 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -2,6 +2,24 @@
 	"comment": "",
 	"ignore": "test",
 	"package": [
+		{
+			"path": "appengine",
+			"revision": ""
+		},
+		{
+			"path": "appengine_internal",
+			"revision": ""
+		},
+		{
+			"path": "appengine_internal/base",
+			"revision": ""
+		},
+		{
+			"checksumSHA1": "AH7jcN7pvaPDU6UjHdpT081DDGk=",
+			"path": "cloud.google.com/go/compute/metadata",
+			"revision": "0dcfca594c1c27abc8ef55a03210e34214f59292",
+			"revisionTime": "2017-07-05T18:41:20Z"
+		},
 		{
 			"checksumSHA1": "NX4v3cbkXAJxFlrncqT9yEUBuoA=",
 			"path": "github.com/PuerkitoBio/purell",
@@ -100,6 +118,12 @@
 			"revision": "23def4e6c14b4da8ac2ed8007337bc5eb5007998",
 			"revisionTime": "2016-01-25T20:49:56Z"
 		},
+		{
+			"checksumSHA1": "qlPUeFabwF4RKAOF1H+yBFU1Veg=",
+			"path": "github.com/golang/protobuf/proto",
+			"revision": "6a1fa9404c0aebf36c879bc50152edcc953910d2",
+			"revisionTime": "2017-06-22T20:25:51Z"
+		},
 		{
 			"checksumSHA1": "fgPBEKvm7D7y4IMxGaxHsPmtYtU=",
 			"path": "github.com/google/gofuzz",
@@ -220,6 +244,18 @@
 			"revision": "0fe963104e9d1877082f8fb38f816fcd97eb1d10",
 			"revisionTime": "2017-05-16T11:44:04Z"
 		},
+		{
+			"checksumSHA1": "Y+HGqEkYM15ir+J93MEaHdyFy0c=",
+			"path": "golang.org/x/net/context",
+			"revision": "054b33e6527139ad5b1ec2f6232c3b175bd9a30c",
+			"revisionTime": "2017-07-05T22:25:54Z"
+		},
+		{
+			"checksumSHA1": "WHc3uByvGaMcnSoI21fhzYgbOgg=",
+			"path": "golang.org/x/net/context/ctxhttp",
+			"revision": "054b33e6527139ad5b1ec2f6232c3b175bd9a30c",
+			"revisionTime": "2017-07-05T22:25:54Z"
+		},
 		{
 			"checksumSHA1": "Wl71V4Lh2iajiRnVAMPtd276vTg=",
 			"path": "golang.org/x/net/http2",
@@ -244,6 +280,36 @@
 			"revision": "84f0e6f92b10139f986b1756e149a7d9de270cdc",
 			"revisionTime": "2017-05-12T21:27:43Z"
 		},
+		{
+			"checksumSHA1": "SjCoL7KD7qBmgSuqGTCAuUhigDk=",
+			"path": "golang.org/x/oauth2",
+			"revision": "cce311a261e6fcf29de72ca96827bdb0b7d9c9e6",
+			"revisionTime": "2017-06-29T19:00:38Z"
+		},
+		{
+			"checksumSHA1": "ma8yoPKIsJlRiVBDUEdX78kyAdU=",
+			"path": "golang.org/x/oauth2/google",
+			"revision": "cce311a261e6fcf29de72ca96827bdb0b7d9c9e6",
+			"revisionTime": "2017-06-29T19:00:38Z"
+		},
+		{
+			"checksumSHA1": "eztsaK5Uim4juQOtqUf6VF+foD4=",
+			"path": "golang.org/x/oauth2/internal",
+			"revision": "cce311a261e6fcf29de72ca96827bdb0b7d9c9e6",
+			"revisionTime": "2017-06-29T19:00:38Z"
+		},
+		{
+			"checksumSHA1": "huVltYnXdRFDJLgp/ZP9IALzG7g=",
+			"path": "golang.org/x/oauth2/jws",
+			"revision": "cce311a261e6fcf29de72ca96827bdb0b7d9c9e6",
+			"revisionTime": "2017-06-29T19:00:38Z"
+		},
+		{
+			"checksumSHA1": "/eV4E08BY+f1ZikiR7OOMJAj3m0=",
+			"path": "golang.org/x/oauth2/jwt",
+			"revision": "cce311a261e6fcf29de72ca96827bdb0b7d9c9e6",
+			"revisionTime": "2017-06-29T19:00:38Z"
+		},
 		{
 			"checksumSHA1": "5VxR5SMHHsE7QM8kwrc8ZckU0TU=",
 			"path": "golang.org/x/sys/unix",
@@ -280,6 +346,66 @@
 			"revision": "19e51611da83d6be54ddafce4a4af510cb3e9ea4",
 			"revisionTime": "2017-04-21T08:09:44Z"
 		},
+		{
+			"checksumSHA1": "WPEbk80NB3Esdh4Yk0PXr2K7xVU=",
+			"path": "google.golang.org/appengine",
+			"revision": "d11fc8a8dccf41ab5bd3c51a3ea52051240e325c",
+			"revisionTime": "2017-06-30T20:53:15Z"
+		},
+		{
+			"checksumSHA1": "4o2JkeR2LyUfZ7BQIzHUejyqKno=",
+			"path": "google.golang.org/appengine/internal",
+			"revision": "d11fc8a8dccf41ab5bd3c51a3ea52051240e325c",
+			"revisionTime": "2017-06-30T20:53:15Z"
+		},
+		{
+			"checksumSHA1": "x6Thdfyasqd68dWZWqzWWeIfAfI=",
+			"path": "google.golang.org/appengine/internal/app_identity",
+			"revision": "d11fc8a8dccf41ab5bd3c51a3ea52051240e325c",
+			"revisionTime": "2017-06-30T20:53:15Z"
+		},
+		{
+			"checksumSHA1": "TsNO8P0xUlLNyh3Ic/tzSp/fDWM=",
+			"path": "google.golang.org/appengine/internal/base",
+			"revision": "d11fc8a8dccf41ab5bd3c51a3ea52051240e325c",
+			"revisionTime": "2017-06-30T20:53:15Z"
+		},
+		{
+			"checksumSHA1": "5QsV5oLGSfKZqTCVXP6NRz5T4Tw=",
+			"path": "google.golang.org/appengine/internal/datastore",
+			"revision": "d11fc8a8dccf41ab5bd3c51a3ea52051240e325c",
+			"revisionTime": "2017-06-30T20:53:15Z"
+		},
+		{
+			"checksumSHA1": "Gep2T9zmVYV8qZfK2gu3zrmG6QE=",
+			"path": "google.golang.org/appengine/internal/log",
+			"revision": "d11fc8a8dccf41ab5bd3c51a3ea52051240e325c",
+			"revisionTime": "2017-06-30T20:53:15Z"
+		},
+		{
+			"checksumSHA1": "eLZVX1EHLclFtQnjDIszsdyWRHo=",
+			"path": "google.golang.org/appengine/internal/modules",
+			"revision": "d11fc8a8dccf41ab5bd3c51a3ea52051240e325c",
+			"revisionTime": "2017-06-30T20:53:15Z"
+		},
+		{
+			"checksumSHA1": "a1XY7rz3BieOVqVI2Et6rKiwQCk=",
+			"path": "google.golang.org/appengine/internal/remote_api",
+			"revision": "d11fc8a8dccf41ab5bd3c51a3ea52051240e325c",
+			"revisionTime": "2017-06-30T20:53:15Z"
+		},
+		{
+			"checksumSHA1": "QtAbHtHmDzcf6vOV9eqlCpKgjiw=",
+			"path": "google.golang.org/appengine/internal/urlfetch",
+			"revision": "d11fc8a8dccf41ab5bd3c51a3ea52051240e325c",
+			"revisionTime": "2017-06-30T20:53:15Z"
+		},
+		{
+			"checksumSHA1": "akOV9pYnCbcPA8wJUutSQVibdyg=",
+			"path": "google.golang.org/appengine/urlfetch",
+			"revision": "d11fc8a8dccf41ab5bd3c51a3ea52051240e325c",
+			"revisionTime": "2017-06-30T20:53:15Z"
+		},
 		{
 			"checksumSHA1": "6f8MEU31llHM1sLM/GGH4/Qxu0A=",
 			"path": "gopkg.in/inf.v0",
@@ -590,6 +716,12 @@
 			"version": "v3.0",
 			"versionExact": "v3.0.0-beta.0"
 		},
+		{
+			"checksumSHA1": "2BDblVF8oc5+YR6c625SC0JnnUo=",
+			"path": "k8s.io/client-go/plugin/pkg/client/auth/gcp",
+			"revision": "6daa6a29b8b866679a5be039ad0c8149de1cd60a",
+			"revisionTime": "2017-07-05T23:52:01Z"
+		},
 		{
 			"checksumSHA1": "WKKCrNPVWbJYm1im1DA9145z6xI=",
 			"path": "k8s.io/client-go/rest",
@@ -606,6 +738,12 @@
 			"version": "v3.0",
 			"versionExact": "v3.0.0-beta.0"
 		},
+		{
+			"checksumSHA1": "PmCh3R76TMf8+0P5Jek4fsBO2hA=",
+			"path": "k8s.io/client-go/third_party/forked/golang/template",
+			"revision": "6daa6a29b8b866679a5be039ad0c8149de1cd60a",
+			"revisionTime": "2017-07-05T23:52:01Z"
+		},
 		{
 			"checksumSHA1": "BjbzPnaU9672noMuSuqr0JIg344=",
 			"path": "k8s.io/client-go/tools/auth",
@@ -701,6 +839,12 @@
 			"revisionTime": "2017-03-31T20:34:26Z",
 			"version": "v3.0",
 			"versionExact": "v3.0.0-beta.0"
+		},
+		{
+			"checksumSHA1": "GsIX2f2J67NXq1HBiSEQh0SuTB4=",
+			"path": "k8s.io/client-go/util/jsonpath",
+			"revision": "6daa6a29b8b866679a5be039ad0c8149de1cd60a",
+			"revisionTime": "2017-07-05T23:52:01Z"
 		}
 	],
 	"rootPath": "github.com/ksonnet/kubecfg"
-- 
GitLab